diff --git a/APP_Framework/Framework/Kconfig b/APP_Framework/Framework/Kconfig index 6b119431..b770d0a3 100644 --- a/APP_Framework/Framework/Kconfig +++ b/APP_Framework/Framework/Kconfig @@ -22,6 +22,7 @@ menu "Framework" source "$APP_DIR/Framework/connection/Kconfig" source "$APP_DIR/Framework/knowing/Kconfig" source "$APP_DIR/Framework/control/Kconfig" + source "$APP_DIR/Framework/security/Kconfig" endmenu diff --git a/APP_Framework/Framework/Makefile b/APP_Framework/Framework/Makefile index 128587c8..a095e8ae 100644 --- a/APP_Framework/Framework/Makefile +++ b/APP_Framework/Framework/Makefile @@ -16,6 +16,9 @@ ifeq ($(CONFIG_SUPPORT_CONTROL_FRAMEWORK),y) SRC_DIR += control endif +ifeq ($(CONFIG_CRYPTO),y) + SRC_DIR += security +endif include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/security/Kconfig b/APP_Framework/Framework/security/Kconfig similarity index 100% rename from Ubiquitous/XiUOS/framework/security/Kconfig rename to APP_Framework/Framework/security/Kconfig diff --git a/Ubiquitous/XiUOS/framework/security/Makefile b/APP_Framework/Framework/security/Makefile similarity index 100% rename from Ubiquitous/XiUOS/framework/security/Makefile rename to APP_Framework/Framework/security/Makefile diff --git a/Ubiquitous/XiUOS/framework/security/crypto/Makefile b/APP_Framework/Framework/security/crypto/Makefile similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/Makefile rename to APP_Framework/Framework/security/crypto/Makefile diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/bignum.h b/APP_Framework/Framework/security/crypto/include/bignum.h similarity index 95% rename from Ubiquitous/XiUOS/framework/security/crypto/include/bignum.h rename to APP_Framework/Framework/security/crypto/include/bignum.h index 18b3c62f..2dee89dd 100755 --- a/Ubiquitous/XiUOS/framework/security/crypto/include/bignum.h +++ b/APP_Framework/Framework/security/crypto/include/bignum.h @@ -27,7 +27,7 @@ #include #include #include -#include +#include #define BIGNUMBER_SIZE_8WORD 8 #define BIGNUMBER_SIZE_16WORD 16 diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/ecc.h b/APP_Framework/Framework/security/crypto/include/ecc.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/ecc.h rename to APP_Framework/Framework/security/crypto/include/ecc.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/join.h b/APP_Framework/Framework/security/crypto/include/join.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/join.h rename to APP_Framework/Framework/security/crypto/include/join.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/qn.h b/APP_Framework/Framework/security/crypto/include/qn.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/qn.h rename to APP_Framework/Framework/security/crypto/include/qn.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/sm3.h b/APP_Framework/Framework/security/crypto/include/sm3.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/sm3.h rename to APP_Framework/Framework/security/crypto/include/sm3.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/sm4.h b/APP_Framework/Framework/security/crypto/include/sm4.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/sm4.h rename to APP_Framework/Framework/security/crypto/include/sm4.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/sm9.h b/APP_Framework/Framework/security/crypto/include/sm9.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/sm9.h rename to APP_Framework/Framework/security/crypto/include/sm9.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/sm9_para.h b/APP_Framework/Framework/security/crypto/include/sm9_para.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/sm9_para.h rename to APP_Framework/Framework/security/crypto/include/sm9_para.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/sm9_test.h b/APP_Framework/Framework/security/crypto/include/sm9_test.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/sm9_test.h rename to APP_Framework/Framework/security/crypto/include/sm9_test.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/include/sm9_util.h b/APP_Framework/Framework/security/crypto/include/sm9_util.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/include/sm9_util.h rename to APP_Framework/Framework/security/crypto/include/sm9_util.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm3/sm3.c b/APP_Framework/Framework/security/crypto/sm3/sm3.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm3/sm3.c rename to APP_Framework/Framework/security/crypto/sm3/sm3.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm3/sm3_hmac.c b/APP_Framework/Framework/security/crypto/sm3/sm3_hmac.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm3/sm3_hmac.c rename to APP_Framework/Framework/security/crypto/sm3/sm3_hmac.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm4/sm4_common.c b/APP_Framework/Framework/security/crypto/sm4/sm4_common.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm4/sm4_common.c rename to APP_Framework/Framework/security/crypto/sm4/sm4_common.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm4/sm4_enc_mode.c b/APP_Framework/Framework/security/crypto/sm4/sm4_enc_mode.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm4/sm4_enc_mode.c rename to APP_Framework/Framework/security/crypto/sm4/sm4_enc_mode.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm4/sms4_enc.c b/APP_Framework/Framework/security/crypto/sm4/sms4_enc.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm4/sms4_enc.c rename to APP_Framework/Framework/security/crypto/sm4/sms4_enc.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm4/sms4_lcl.h b/APP_Framework/Framework/security/crypto/sm4/sms4_lcl.h similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm4/sms4_lcl.h rename to APP_Framework/Framework/security/crypto/sm4/sms4_lcl.h diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm4/sms4_setkey.c b/APP_Framework/Framework/security/crypto/sm4/sms4_setkey.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm4/sms4_setkey.c rename to APP_Framework/Framework/security/crypto/sm4/sms4_setkey.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/bignum.c b/APP_Framework/Framework/security/crypto/sm9/bignum.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/bignum.c rename to APP_Framework/Framework/security/crypto/sm9/bignum.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/ecc.c b/APP_Framework/Framework/security/crypto/sm9/ecc.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/ecc.c rename to APP_Framework/Framework/security/crypto/sm9/ecc.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/join.c b/APP_Framework/Framework/security/crypto/sm9/join.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/join.c rename to APP_Framework/Framework/security/crypto/sm9/join.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/qn.c b/APP_Framework/Framework/security/crypto/sm9/qn.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/qn.c rename to APP_Framework/Framework/security/crypto/sm9/qn.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/sm9.c b/APP_Framework/Framework/security/crypto/sm9/sm9.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/sm9.c rename to APP_Framework/Framework/security/crypto/sm9/sm9.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/sm9_para.c b/APP_Framework/Framework/security/crypto/sm9/sm9_para.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/sm9_para.c rename to APP_Framework/Framework/security/crypto/sm9/sm9_para.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/sm9/sm9_util.c b/APP_Framework/Framework/security/crypto/sm9/sm9_util.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/sm9/sm9_util.c rename to APP_Framework/Framework/security/crypto/sm9/sm9_util.c diff --git a/Ubiquitous/XiUOS/framework/security/crypto/test/sm3_test.c b/APP_Framework/Framework/security/crypto/test/sm3_test.c similarity index 98% rename from Ubiquitous/XiUOS/framework/security/crypto/test/sm3_test.c rename to APP_Framework/Framework/security/crypto/test/sm3_test.c index 20f92821..ab103a75 100755 --- a/Ubiquitous/XiUOS/framework/security/crypto/test/sm3_test.c +++ b/APP_Framework/Framework/security/crypto/test/sm3_test.c @@ -19,7 +19,7 @@ */ #include -#include +#include void sm3_test_case(){ uint8_t result[SM3_DIGEST_LENGTH] = { 0 }; diff --git a/Ubiquitous/XiUOS/framework/security/crypto/test/sm4_test.c b/APP_Framework/Framework/security/crypto/test/sm4_test.c similarity index 99% rename from Ubiquitous/XiUOS/framework/security/crypto/test/sm4_test.c rename to APP_Framework/Framework/security/crypto/test/sm4_test.c index ca0cdd8b..c46aee1f 100755 --- a/Ubiquitous/XiUOS/framework/security/crypto/test/sm4_test.c +++ b/APP_Framework/Framework/security/crypto/test/sm4_test.c @@ -19,7 +19,7 @@ */ #include -#include +#include void sm4_test_case(){ unsigned char ukey[16] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe ,0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 }; diff --git a/Ubiquitous/XiUOS/framework/security/crypto/test/sm9_test.c b/APP_Framework/Framework/security/crypto/test/sm9_test.c similarity index 100% rename from Ubiquitous/XiUOS/framework/security/crypto/test/sm9_test.c rename to APP_Framework/Framework/security/crypto/test/sm9_test.c diff --git a/APP_Framework/lib/app_newlib/fs_syscalls.c b/APP_Framework/lib/app_newlib/fs_syscalls.c index 0b9cae77..59402915 100644 --- a/APP_Framework/lib/app_newlib/fs_syscalls.c +++ b/APP_Framework/lib/app_newlib/fs_syscalls.c @@ -96,12 +96,3 @@ _ssize_t _write_r(struct _reent *ptr, int fd, const void *buf, size_t nbytes) { return write(fd, buf, nbytes); } - -void exit(int __status); -void _exit(int __status){} -void _kill(int k) -{} -int _getpid(void) -{ - -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/Kconfig b/Ubiquitous/XiUOS/framework/Kconfig deleted file mode 100644 index 6fe49559..00000000 --- a/Ubiquitous/XiUOS/framework/Kconfig +++ /dev/null @@ -1,15 +0,0 @@ -menu "APP Framework" - -source "$KERNEL_DIR/framework/connection/Kconfig" - -source "$KERNEL_DIR/framework/intelligent/Kconfig" - -source "$KERNEL_DIR/framework/control/Kconfig" - -source "$KERNEL_DIR/lib/Kconfig" - -source "$KERNEL_DIR/framework/security/Kconfig" - - - -endmenu diff --git a/Ubiquitous/XiUOS/framework/Makefile b/Ubiquitous/XiUOS/framework/Makefile deleted file mode 100644 index a829539e..00000000 --- a/Ubiquitous/XiUOS/framework/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_DIR := intelligent security - -include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/Kconfig b/Ubiquitous/XiUOS/framework/connection/Adapter/Kconfig deleted file mode 100644 index 4e72451a..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/Kconfig +++ /dev/null @@ -1,31 +0,0 @@ -menuconfig CONNECTION_COMMUNICATION_WIFI - bool "Enable Wifi" - default n - -menuconfig CONNECTION_COMMUNICATION_LORA -bool "Enable LORA" -default n -if CONNECTION_COMMUNICATION_LORA -source "$KERNEL_DIR/framework/connection/Adapter/lora/Kconfig" -endif - -menuconfig CONNECTION_COMMUNICATION_ETHERNET - bool "Enable Ethernet" - default n - -menuconfig CONNECTION_COMMUNICATION_ZIGBEE - bool "Enable Zigbee" - default n - - -menuconfig CONNECTION_COMMUNICATION_NB_IOT - bool "Enable NB-IOT" - default n - -menuconfig CONNECTION_COMMUNICATION_4G - bool "Enable 4G" - default n - -menuconfig CONNECTION_COMMUNICATION_BLUETOOTH - bool "Enable BlueTooth" - default n diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/Makefile deleted file mode 100644 index 7b936696..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/Makefile +++ /dev/null @@ -1,32 +0,0 @@ - -SRC_DIR := src - -ifeq ($(CONFIG_CONNECTION_COMMUNICATION_LORA), y) -SRC_DIR += lora -endif - -ifeq ($(CONFIG_CONNECTION_COMMUNICATION_ETHERNET), y) -SRC_DIR += ethernet -endif - -ifeq ($(CONFIG_CONNECTION_COMMUNICATION_WIFI), y) -SRC_DIR += wifi -endif - -ifeq ($(CONFIG_CONNECTION_COMMUNICATION_ZIGBEE), y) - SRC_DIR += zigbee -endif - -ifeq ($(CONFIG_CONNECTION_COMMUNICATION_NB_IOT), y) - SRC_DIR += nbiot -endif - -# ifeq ($(CONFIG_CONNECTION_COMMUNICATION_4G), y) -# SRC_DIR += 4G -# endif - -ifeq ($(CONFIG_CONNECTION_COMMUNICATION_BLUETOOTH), y) -SRC_DIR += bluetooth -endif - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/Makefile deleted file mode 100644 index 1959f2d3..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_FILES := xs_adapter_bluetooth.c xs_adaper_bluetooth_register.c - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/xs_adaper_bluetooth_register.c b/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/xs_adaper_bluetooth_register.c deleted file mode 100644 index 72a31d65..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/xs_adaper_bluetooth_register.c +++ /dev/null @@ -1,45 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** -* @file: xs_AdaperBluetooth_register.c -* @brief: register Bluetooth in initialization -* @version: 1.0 -* @author: AIIT XUOS Lab -* @date: 2021/4/30 -* -*/ -#include -#include -#include -/* initialize to the register list*/ -int RegisterAdapterBluetooth(void) -{ - static struct AdapterBluetooth bluetooth_adapter; - memset(&bluetooth_adapter, 0, sizeof(bluetooth_adapter)); - - static struct AdapterDone bluetooth_send_done = { - .NetAiitOpen = BluetoothOpen, - .NetAiitClose = BluetoothClose, - .NetAiitSend = BluetoothSend, - .NetAiitReceive = BluetoothReceive, - .NetAiitJoin = NULL, - .NetAiitIoctl = NULL, - }; - bluetooth_adapter.parent.done = bluetooth_send_done; - bluetooth_adapter.name = "Bluetooth"; - - BluetoothAdapterInit(); - BluetoothAdapterRegister((adapter_t)&bluetooth_adapter); - - return EOK; -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/xs_adapter_bluetooth.c b/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/xs_adapter_bluetooth.c deleted file mode 100644 index b652c192..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/bluetooth/xs_adapter_bluetooth.c +++ /dev/null @@ -1,155 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** -* @file: xs_Adapterxs_adapter_bluetooth.c -* @brief: bluetooth open close function -* @version: 1.0 -* @author: AIIT XUOS Lab -* @date: 2021/4/30 -* -*/ -#include -#include "xs_adapter_bluetooth.h" -#include -#include -#include -#include - -#ifdef BOARD_K210_EVB -#define SAMPLE_UART_NAME "/dev/uart3_dev3" -#endif -#ifdef BOARD_STM32F407_EVB -#define SAMPLE_UART_NAME "/dev/usart3_dev3" -#endif - -static int serial_fd; -static int32_t bluetooth_receive; -static int rx_sem; -char bluetooth_buffer[NAME_NUM_MAX ]={0}; - -/* initialize srial port to open bluetooth*/ -int BluetoothOpen(struct Adapter *padapter) -{ - - /* Open device in read-write mode */ - serial_fd = open(SAMPLE_UART_NAME,O_RDWR); - - /* set serial config, serial_baud_rate = 115200 */ - - struct SerialDataCfg cfg; - cfg.serial_baud_rate = BAUD_RATE_115200; - cfg.serial_data_bits = DATA_BITS_8; - cfg.serial_stop_bits = STOP_BITS_1; - cfg.serial_parity_mode = PARITY_NONE; - cfg.serial_bit_order = 0; - cfg.serial_invert_mode = 0; - cfg.serial_buffer_size = 128; - - ioctl(serial_fd, 0, &cfg); - UserTaskDelay(1000); - printf("Bluetooth ready\n"); - - return 0; -} - -/* send message to srial port*/ -int BluetoothSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved) -{ - write(serial_fd,data,strlen(data)); - - return 0; -} - - -/*thread to read message from srial port*/ -void SerialThreadEntry(void *parameter) -{ - char ch; - int i = 0; - int n; - int run = 0; - while (1){ - n = read(serial_fd,&ch,1); - if (n>0){ - if (ch == '~'){ - UserSemaphoreAbandon(rx_sem); - run = 1; - break; - } - bluetooth_buffer[i++] = ch; - } - if (run ==1) - break; - } -} - -int BluetoothReceive(struct Adapter *padapter, char* rev_buffer, int buffer_len,int time_out, bool block, void* reserved) -{ - - x_err_t ret = EOK; - /* Set callback function */ - /* Create thread serial */ - UtaskType recv; - recv.name[0] = 'z'; - recv.func_entry = SerialThreadEntry; - recv.func_param = NONE; - recv.stack_size = 1024; - recv.prio = 25; - memset(bluetooth_buffer, 0, sizeof(bluetooth_buffer)); - - /* Initialize semaphore */ - rx_sem = UserSemaphoreCreate(0); - - bluetooth_receive = UserTaskCreate(recv); - UserTaskStartup(bluetooth_receive); - - /*copy to the receive buffer*/ - UserSemaphoreObtain(rx_sem,-1); - memcpy(rev_buffer,bluetooth_buffer,strlen(bluetooth_buffer)+1 ); - - return ret; - -} - -void BluetoothSettingDemo(int argc, char *argv[]) -{ - - adapter_t padapter = BluetoothAdapterFind("Bluetooth"); - if (NONE == padapter){ - KPrintf("adapter find failed!\n"); - return; - } - /*Open adapter*/ - if (0 != padapter->done.NetAiitOpen(padapter)){ - KPrintf("adapter open failed!\n"); - return; - } - - BluetoothOpen(padapter); - /*Bluetooth communication settings*/ - /*it can be changed if needed*/ - char* set5 = "AT"; - write(serial_fd,set5,strlen(set5)); - UserTaskDelay(1000); - printf("bluetooth setting success!\n"); -} -#ifndef SEPARATE_COMPILE -SHELL_EXPORT_CMD(SHELL_CMD_PERMISSION(0)|SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN), -BluetoothSettingDemo, BluetoothSettingDemo, bluetooth send function ); -#endif - -void BluetoothClose(struct Adapter *padapter) -{ - UserTaskDelete(bluetooth_receive); - close(serial_fd); -} diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/Makefile deleted file mode 100644 index c2d4f5d7..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -SRC_FILES += xs_adapterAT_ethernet.c \ - xs_adapterAT_ethernet_register.c - - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/xs_adapterAT_ethernet.c b/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/xs_adapterAT_ethernet.c deleted file mode 100644 index 6fb52249..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/xs_adapterAT_ethernet.c +++ /dev/null @@ -1,409 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_AdapterAT_ethernet.c - * @brief Structure and function declarations of the connection ethernet - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ -#include -#include -#include -#include -#include -#include - -/** - * @description: Close ethernet - * @param padapter - ethernet device pointer - */ -void EthernetClose(struct Adapter *padapter) -{ -} - -/** - * @description: open ethernet - * @param padapter - ethernet device pointer - */ -int EthernetOpen(struct Adapter *padapter) -{ - char *agent_name = "uart3_client"; - const char *device_name = ETHERNET_UART_NAME; - - uint32 result; - if (InitATAgent(agent_name, device_name, 512, NULL)){ - printf("InitATAgent failed ! \n"); - result = -ERROR; - return result; - } - - ATAgentType at_agent = GetATAgent(agent_name); - if (NULL == at_agent){ - printf("GetATAgent failed ! \n"); - return -ERROR; - } - UserTaskDelay(5000); - struct AdapterAT *ethernetAT_adapter = (struct AdapterAT *)padapter; - ethernetAT_adapter->agent = at_agent; - return EOK; -} - -int EthernetSend(struct Adapter *padapter, const char *data, int len, bool block, int time_out, int delay, send_success cb, void *param, void* p) -{ - struct AdapterAT *ethernetAT_adapter = (struct AdapterAT *)padapter; - - if (ethernetAT_adapter->agent){ - EntmSend(ethernetAT_adapter->agent, data, len); - } -} - -int EthernetReceive(struct Adapter *padapter, char *rev_buffer, int buffer_len, int time_out, bool block, void* p) -{ - printf("ethernet receive waiting ... \n"); - - struct AdapterAT *ethernetAT_adapter = (struct AdapterAT *)padapter; - if (ethernetAT_adapter->agent){ - if (EntmRecv(ethernetAT_adapter->agent, rev_buffer, buffer_len, 40000)) - printf("EntmRecv failed ! \n"); - }else{ - printf("Can not find agent \n"); - } -} - -uint32 EthernetInitAtCmd(ATAgentType at_agent) -{ - uint32 result; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply){ - printf("CreateATReply failed ! \n"); - result = -ERROR; - goto __exit; - } - - ATOrderSend(at_agent, REPLY_TIME_OUT, NULL, "+++"); - UserTaskDelay(100); - - - ATOrderSend(at_agent, REPLY_TIME_OUT, NULL, "a"); - - UserTaskDelay(500); - - return result; - -__exit: - if (reply) - DeleteATReply(reply); - - return result; -} - -static void EthernetSetUpAdapter(void *parameter) -{ - #define INIT_RETRY 5 - #define LEN_PARA_BUF 128 - uint8 server_addr_wifi[LEN_PARA_BUF]="192.168.1.183"; - uint8 server_port_wifi[LEN_PARA_BUF]="12345"; - uint8 WIFI_ssid[LEN_PARA_BUF]="DDST 2"; - uint8 WIFI_pwd[LEN_PARA_BUF]="passw0rd"; - char cmd[LEN_PARA_BUF]; - - struct AdapterAT *adapterAT = (struct AdapterAT *) parameter; - - //struct at_device_esp8266 *esp8266 = (struct at_device_esp8266 *) device->UserData; - struct ATAgent *agent = adapterAT->agent; - ATReplyType reply = NONE; - x_err_t result = EOK; - x_size_t retry_num = INIT_RETRY; - - //DBG("%s device initialize start.", adapterAT->parent.); - - /* wait hfa21 device startup finish */ - UserTaskDelay(5000); - - reply = CreateATReply(64); - if (reply == NONE){ - printf("no memory for reply create."); - return; - } - - while (retry_num--){ - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "+++"); - UserTaskDelay(100); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "a"); - UserTaskDelay(2500); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "AT+FCLR\r"); - UserTaskDelay(30000); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "+++"); - UserTaskDelay(100); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "a"); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WSSSID="); - strcat(cmd,WIFI_ssid); - strcat(cmd,"\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WSKEY=WPA2PSK,AES,"); - strcat(cmd,WIFI_pwd); - strcat(cmd,"\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WMODE=sta\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+NETP=TCP,CLIENT,"); - strcat(cmd,server_port_wifi); - strcat(cmd,","); - strcat(cmd,server_addr_wifi); - strcat(cmd,"\r"); - // strcpy(cmd,"AT+NETP\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcat(cmd,"AT+Z\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - /* initialize successfully */ - result = EOK; - break; - - __exit: - if (result != EOK) - UserTaskDelay(1000); - } - - if (reply) - DeleteATReply(reply); -} - -int EthernetSetUp(struct AdapterAT *adapterAT) -{ - EthernetSetUpAdapter(adapterAT); - - return EOK; -} - -int EthernetDHCP(struct AdapterAT *adapterAT, bool enable) -{ - int result = EOK; - ATReplyType reply = NONE; - char dhcp_status[4]; - memset(dhcp_status,0,sizeof(dhcp_status)); - if(enable) - strcpy(dhcp_status,"on"); - else - strcpy(dhcp_status,"off"); - - reply = CreateATReply(64); - if (reply == NONE){ - printf("no memory for reply struct."); - return -ENOMEMORY; - } - - /* send dhcp set commond "AT+CWDHCP_CUR=," and wait response */ - if (ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, reply, "AT+DHCPDEN=%s", dhcp_status) < 0){ - result = -ERROR; - goto __exit; - } - - -__exit: - if (reply) - DeleteATReply(reply); - - return result; -} - -int EthernetPing(struct AdapterAT *adapterAT, const char *destination,struct PingResult *ping_resp) -{ - char *ping_result = NONE; - ping_result = (char *) UserCalloc(1, 17); - - EthernetInitAtCmd(adapterAT->agent); - - uint32 result = EOK; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply){ - printf("CreateATReply failed ! \n"); - result = -ERROR; - goto __exit; - } - - printf("\\r = 0x%x, \\n = 0x%x\n", '\r', '\n'); - - //ping baidu.com - uint32 err = ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, reply, "AT+PING=%s", "192.168.250.240\r"); - if (err){ - printf("at_obj_exec_cmd(AT+PING)failed ! err = %d\n", err); - result = -ERROR; - goto __exit; - } - - UserTaskDelay(2500); - - ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, NULL, "AT+Z\r"); - UserTaskDelay(2000); - - - const char * result_buf = GetReplyText(reply); - if(!result_buf){ - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - char* str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - - ParseATReply(str, "+ok=%s\r", ping_result); - - printf("ping result is:%s\n", ping_result); - -__exit: - if (reply) - DeleteATReply(reply); - return result; -} - -int EthernetNetstat(struct AdapterAT *adapterAT) -{ -#define HFA21_NETSTAT_RESP_SIZE 320 -#define HFA21_NETSTAT_TYPE_SIZE 10 -#define HFA21_NETSTAT_IPADDR_SIZE 17 -#define HFA21_NETP_EXPRESSION "+ok=%[^,],%[^,],%d,%s\r" -#define HFA21_WANN_EXPRESSION "+ok=%[^,],%[^,],%[^,],%[^,]\r" -#define HFA21_LANN_EXPRESSION "+ok=%[^,],%[^,]\r" -#define HFA21_WMODE_EXPRESSION "+ok=%s\r" - - ATReplyType reply = NULL; - struct ATAgent *agent = adapterAT->agent; - uint32 result; - char * result_buf = NULL; - char * str = NULL; - - /* sta/ap */ - char *work_mode = NULL; - /* dhcp/static */ - char *ip_mode = NULL; - char *local_ipaddr = NULL; - char *gateway = NULL; - char *netmask = NULL; - local_ipaddr = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - gateway = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - netmask = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - work_mode = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - ip_mode = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - - reply = CreateATReply(HFA21_NETSTAT_RESP_SIZE); - if (reply == NULL) - goto __exit; - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "+++"); - UserTaskDelay(100); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "a"); - UserTaskDelay(2500); - - if (ATOrderSend(agent, REPLY_TIME_OUT, reply, "AT+WMODE\r") < 0) - goto __exit; - - UserTaskDelay(2500); - - result_buf = GetReplyText(reply); - if(!result_buf){ - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - /* parse the third line of response data, get the network connection information */ - ParseATReply(str, HFA21_WMODE_EXPRESSION, work_mode); - - if(work_mode[0]=='S'){ - if (ATOrderSend(agent, REPLY_TIME_OUT, reply, "AT+WANN\r") < 0) - goto __exit; - - UserTaskDelay(2500); - - result_buf = GetReplyText(reply); - if(!result_buf){ - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - /* parse the third line of response data, get the network connection information */ - ParseATReply(str, HFA21_WANN_EXPRESSION, ip_mode, local_ipaddr, netmask, gateway); - }else{ - if (ATOrderSend(agent, REPLY_TIME_OUT, reply, "AT+LANN\r") < 0) - goto __exit; - - UserTaskDelay(2500); - - result_buf = GetReplyText(reply); - if(!result_buf){ - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - /* parse the third line of response data, get the network connection information */ - ParseATReply(str, HFA21_LANN_EXPRESSION, local_ipaddr, netmask); - } - - ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, NULL, "AT+Z\r"); - UserTaskDelay(2500); - - printf("work mode: %s\n", work_mode); - if(work_mode[0]=='S') - printf("ip mode: %s\nlocal ip: %s\nnetmask: %s\ngateway: %s\n", ip_mode, local_ipaddr, netmask, gateway); - else - printf("local ip: %s\nnetmask: %s\n", local_ipaddr, netmask); - - return EOK; - -__exit: - if (reply) - DeleteATReply(reply); - if (local_ipaddr) - UserFree(local_ipaddr); - if (netmask) - UserFree(netmask); - if (gateway) - UserFree(gateway); - if (work_mode) - UserFree(work_mode); -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/xs_adapterAT_ethernet_register.c b/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/xs_adapterAT_ethernet_register.c deleted file mode 100644 index af457da0..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/ethernet/xs_adapterAT_ethernet_register.c +++ /dev/null @@ -1,70 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_AdapterAT_ethernet_register.c - * @brief Structure and function declarations of the ethernet register - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include -#include -#include -#include -#include - - -const struct AdapterDone EthernetAdapterDone = -{ - EthernetClose, - EthernetOpen, - NULL, - EthernetSend, - EthernetReceive, - NULL, -}; - -const struct ATDone EthernetATDone = -{ - EthernetSetUp, - NULL, - NULL, - NULL, - EthernetDHCP, - EthernetPing, - EthernetNetstat, - NULL, -}; - -int RegisterAdapterEthernet(void) -{ - struct AdapterEthernet *ethernet_adapter = malloc(sizeof(struct AdapterEthernet)); - if (ethernet_adapter == NULL){ - printf("out of memory\n"); - return ERROR; - } - - struct AdapterAT *ethernetAT_adapter = (struct AdapterAT *)ethernet_adapter; - struct Adapter *adapter = (struct Adapter *)ethernet_adapter; - - ethernet_adapter->parent.atdone = EthernetATDone; - ethernet_adapter->parent.parent.done = EthernetAdapterDone; - - ethernetAT_adapter->at_adapter_id = ETHERNET_ADAPTER_ID; - - ATAdapterInit(); - ATAdapterRegister(ethernetAT_adapter); - - return EOK; -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter.h deleted file mode 100644 index 5a0a89bd..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter.h +++ /dev/null @@ -1,72 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter.h - * @brief Adapter interface - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_N -#define XS_ADAPTER_N - -#include -#include -#include - - - -enum AdapterType { - ADAPTER_LORA = 0, /* Lora */ - ADAPTER_4G , /* 4G */ - ADAPTER_NBIOT , /* NBIot */ - ADAPTER_WIFI , /* WIFI */ - ADAPTER_ETHERNET , /* ETHERNET */ - ADAPTER_BLUETOOTH , /* BLUETOOTH */ - ADAPTER_ZIGBEE , /* ZIGBEE */ - ADAPTER_5G , /* 5G */ -}; -enum MeshType{ - NET_P2P = 0, - NET_ADHOC_SINGLE_GROUP, - NET_ADHOC_MULTI_GROUP, -}; - -enum NetRoleType{ - ROLE_TYPE_SLAVE = 1, - ROLE_TYPE_MASTER, - ROLE_TYPE_NONE, -}; - -struct Adapter; -struct AdapterDone { - void (*NetAiitClose)(struct Adapter *padapter); - int (*NetAiitOpen)(struct Adapter *padapter); - int (*NetAiitJoin)(struct Adapter *padapter, int dev_type, char* net_id); - int (*NetAiitSend)(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved); - int (*NetAiitReceive)(struct Adapter *padapter, char* rev_buffer, int buffer_len,int time_out, bool block, void* reserved); - int (*NetAiitIoctl)(struct Adapter *padapter, int cmd, void *arg); -}; - -struct Adapter -{ - enum AdapterType type; /* type of adapter, such as lora adapter */ - enum NetRoleType net_role_type; - enum MeshType mesh_type; - struct AdapterDone done; /* socket-like APIs for data transferring */ - struct SysDoubleLinklistNode link; /* link list node */ -}; -typedef struct Adapter *adapter_t; - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at.h deleted file mode 100644 index d4ba8c04..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at.h +++ /dev/null @@ -1,94 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at.h - * @brief AdapterAT interface - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_AT_H -#define XS_ADAPTER_AT_H - -#include "xs_adapter.h" -#include - -#define SOCKET_STATUS_UNUSED (0) -#define SOCKET_STATUS_INIT (1) -#define SOCKET_STATUS_CONNECT (2) - -#define SOCKET_TYPE_DGRAM (0) -#define SOCKET_TYPE_STREAM (1) - -#define SOCKET_PROTOCOL_TCP (6) -#define SOCKET_PROTOCOL_UDP (17) - -#define NET_TYPE_AF_INET (0) -#define NET_TYPE_AF_INET6 (1) - -#define SOCKET_MAX 8 - -struct Socket -{ - uint8_t fd; - uint8_t status ; - struct AddressIpv4 src_ip; - uint16_t src_port; - struct AddressIpv4 dst_ip; - uint16_t dst_port; - uint8_t type; - uint8_t af_type; - uint8_t protocal; - uint8 is_client; -}; - -struct AdapterAT; -struct ATDone -{ - int (*ATOperateUp)(struct AdapterAT *adapterAT); - int (*ATOperateDown)(struct AdapterAT *adapterAT); - int (*ATOperateAddr)(struct AdapterAT *adapterAT, uint ip, uint gateway, uint netmask); - int (*ATOperateDns)(struct AdapterAT *adapterAT, struct AddressIpv4 *dns_addr, uint8 dns_count); - int (*ATOperateDHCP)(struct AdapterAT *adapterAT, bool enable); - int (*ATPing)(struct AdapterAT *adapterAT, const char *destination, struct PingResult *ping_resp); - int (*ATNetstat)(struct AdapterAT *adapterAT); - int (*ATOperateDefault)(struct AdapterAT *adapterAT); - - int (*ATSocketCreate)(struct AdapterAT *adapterAT , uint8_t socket_fd , uint8_t type , uint8_t af_type ); - int (*ATSocketConnect)(struct AdapterAT *adapterAT , uint8_t socket_fd , struct AddressIpv4 dst_ip , uint16_t dst_port, uint8 is_client); - int (*ATSocketClose)(struct AdapterAT *adapterAT , uint8_t socket_fd ); -}; - -struct AdapterAT { - struct Adapter parent; - struct ATAgent *agent; - struct AddressIpv4 ip; - struct AddressIpv4 netmask; - struct AddressIpv4 gateway; - struct AddressIpv4 dns[DNS_COUNT]; - uint32 at_adapter_id; - struct Socket socket[SOCKET_MAX]; - uint8 hardware_address_len; - uint8 hardware_address[HW_MAX]; - uint16 flags; - uint16 mtu; - - void (*change_cb )(struct AdapterAT *adapterAT, enum CbType type, enum ChangeType ch_type_); - - struct ATDone atdone; - - struct SysDoubleLinklistNode link; -}; - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_agent.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_agent.h deleted file mode 100644 index 71f1ee2e..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_agent.h +++ /dev/null @@ -1,81 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_agent.h - * @brief AT proxy, auto receive AT reply and transparency data - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_AT_CLIENT_H -#define XS_ADAPTER_AT_CLIENT_H - -#include -#include -#include - -enum ReceiveMode -{ - ENTM_MODE = 1, - AT_MODE = 2, -}; - -struct ATReply -{ - char *reply_buffer; - uint32 reply_max_len; - uint32 reply_len; -}; -typedef struct ATReply *ATReplyType; - -struct ATAgent -{ - char agent_name[NAME_NUM_MAX]; - int fd; - - char *maintain_buffer; - uint32 maintain_len; - uint32 maintain_max; - - int32 lock; - - ATReplyType reply; - int rsp_sem; - - int32 at_handler; - - #define ENTM_RECV_MAX 256 - char entm_recv_buf[ENTM_RECV_MAX]; - uint32 entm_recv_len; - enum ReceiveMode receive_mode; - int entm_rx_notice; -}; -typedef struct ATAgent *ATAgentType; - -int EntmSend(ATAgentType agent, const char *data, int len); -int EntmRecv(ATAgentType agent, char *rev_buffer, int buffer_len, int time_out); -char *GetReplyText(ATReplyType reply); -ATReplyType CreateATReply(uint32 reply_max_len); -uint IpTint(char *ipstr); -void SwapStr(char *str, int begin, int end); -char* IpTstr(uint ipint); -ATAgentType GetATAgent(const char *agent_name); -int InitATAgent(const char *agent_name, const char *device_name, uint32 maintain_max, struct SerialCfgParam* pserial_cfg); -int ParseATReply(char* str, const char *format, ...); -void DeleteATReply(ATReplyType reply); -int ATOrderSend(ATAgentType agent, uint32 timeout, ATReplyType reply, const char *cmd_expr, ...); - -#define REPLY_TIME_OUT 3000 - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_ethernet.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_ethernet.h deleted file mode 100644 index c2ad2358..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_ethernet.h +++ /dev/null @@ -1,48 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_ethernet.h - * @brief Structure and function declarations of the connection ethernet - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_AT_ETHERNET_H -#define XS_ADAPTER_AT_ETHERNET_H - -#include "xs_adapter.h" -#include "xs_adapter_at.h" -#include "xs_adapter_def.h" -#include "xs_klist.h" - -struct AdapterEthernet { - struct AdapterAT parent; /* inherit from Adapter */ - - char vendor_name[NAME_LEN_MAX]; - char product_ID_ethernet[NAME_LEN_MAX]; - - struct SingleLinklistNode link; -}; - -void EthernetClose(struct Adapter *padapter); -int EthernetOpen(struct Adapter *padapter); -int EthernetSend(struct Adapter *padapter, const char *data, int len, bool block, int time_out, int delay, send_success cb, void *param, void *p); -int EthernetReceive(struct Adapter *padapter, char *rev_buffer, int buffer_len, int time_out, bool block, void *p); - -int EthernetSetUp(struct AdapterAT *adapterAT); -int EthernetDHCP(struct AdapterAT *adapterAT, bool enable); -int EthernetPing(struct AdapterAT *adapterAT, const char *destination, struct PingResult *ping_resp); -int EthernetNetstat(struct AdapterAT *adapterAT); - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_nbiot.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_nbiot.h deleted file mode 100644 index 115dc050..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_nbiot.h +++ /dev/null @@ -1,57 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_nbiot.h - * @brief Structure and function declarations of the connection NBIoT - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_AT_NBIOT_H -#define XS_ADAPTER_AT_NBIOT_H - -#include "xs_adapter.h" -#include "xs_adapter_at.h" -#include "xs_adapter_at_agent.h" -#include "xs_adapter_def.h" - -#include -#include -#include - -#define MAX_SOCKET_NUM 8 - -#define SOCKET_STATUS_UNUSED (0) -#define SOCKET_STATUS_INIT (1) -#define SOCKET_STATUS_CONNECT (2) - -struct AdapterNBIoT { - struct AdapterAT parent; /* inherit from Adapter */ - - char vendor_name[NAME_LEN_MAX]; - char product_ID_ethernet[NAME_LEN_MAX]; - - struct SingleLinklistNode link; -}; - -int NbiotOpen(struct Adapter *padapter); - -int NbiotSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved); -int NBIotRecv(struct Adapter *padapter, char *rev_buffer, int buffer_len, int time_out, bool block); - -int NBIoTSocketConnect(struct AdapterAT *adapterAT , uint8_t socket_fd , struct AddressIpv4 dst_ip , uint16_t dst_port, uint8 is_client); -int NBIoTSocketCreate(struct AdapterAT *adapterAT, uint8_t socket_fd, uint8_t type, uint8_t af_type ); -int NBIoTSocketClose(struct AdapterAT *adapterAT, uint8_t socket_fd ); - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_wifi.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_wifi.h deleted file mode 100644 index c1b77b81..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_at_wifi.h +++ /dev/null @@ -1,50 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_wifi.h - * @brief Structure and function declarations of the connection wifi - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_AT_WIFI_H -#define XS_ADAPTER_AT_WIFI_H - -#include "xs_adapter.h" -#include "xs_adapter_at.h" -#include "xs_adapter_def.h" -#include "xs_klist.h" - -struct Adapterwifi { - struct AdapterAT parent; /* inherit from Adapter */ - - char vendor_name[NAME_LEN_MAX]; - char product_id_wifi[NAME_LEN_MAX]; - - struct SingleLinklistNode link; -}; - -void WifiClose(struct Adapter *padapter); -int WifiOpen(struct Adapter *padapter); -int WifiSend(struct Adapter *padapter, const char *data, int len, bool block, int time_out, int delay, send_success cb, void *param, void *p); -int WifiReceive(struct Adapter *padapter, char *rev_buffer, int buffer_len, int time_out, bool block, void *p); - -int WifiSetUp(struct AdapterAT *adapter_at); -int WifiSetDown(struct AdapterAT *adapter_at); -int WifiSetAddr(struct AdapterAT *adapter_at, uint ip, uint gateway, uint netmask); -int WifiDHCP(struct AdapterAT *adapter_at, bool enable); -int WifiPing(struct AdapterAT *adapter_at, const char *destination,struct PingResult *ping_resp); -int WifiNetstat(struct AdapterAT *adapter_at); - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_bluetooth.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_bluetooth.h deleted file mode 100644 index 872ce0a1..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_bluetooth.h +++ /dev/null @@ -1,41 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** -* @file: xs_adapter_bluetooth.h -* @brief: head file -* @version: 1.0 -* @author: AIIT XUOS Lab -* @date: 2021/4/25 -* -*/ -#ifndef XS_ADAPTER_BLUETOOTH_H -#define XS_ADAPTER_BLUETOOTH_H -#include "xs_adapter.h" - -struct AdapterBluetooth { - struct Adapter parent; /* inherit from Adapter */ - const char * name; /* name of the adapter instance */ - - const char * device_type; /* type of the adapter instance */ - - -}; -typedef struct AdapterBluetooth *AdapterBluetooth_t; - -int BluetoothOpen(struct Adapter *padapter); -int BluetoothSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved); -int BluetoothReceive(struct Adapter *padapter, char* rev_buffer, int buffer_len,int time_out, bool block, void* reserved); -void BluetoothClose(struct Adapter *padapter); - - -#endif diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_def.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_def.h deleted file mode 100644 index dd053347..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_def.h +++ /dev/null @@ -1,77 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_def.h - * @brief defines about adapter - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef __XS_ADAPTER_DEF_H__ -#define __XS_ADAPTER_DEF_H__ - -#include "stdbool.h" - -typedef void(*send_success)(void* param); - -#define NAME_LEN_MAX 32 - -#define IPV6__ADDRESS_COUNT 3U -#define DNS_COUNT 2U -#define HW_MAX 8U - -enum CbType -{ - CB_ADDR_IP, /* IP address */ - CB_ADDR_NETMASK, /* subnet mask */ - CB_ADDR_GATEWAY, /* netmask */ - CB_ADDR_DNS_SERVER, /* dns server */ - CB_STATUS_UP, /* changed to 'up' */ - CB_STATUS_DOWN, /* changed to 'down' */ - CB_STATUS_LINK_UP, /* changed to 'link up' */ - CB_STATUS_LINK_DOWN, /* changed to 'link down' */ - CB_STATUS_INTERNET_UP, /* changed to 'internet up' */ - CB_STATUS_INTERNET_DOWN, /* changed to 'internet down' */ - CB_STATUS_DHCP_ENABLE, /* enable DHCP capability */ - CB_STATUS_DHCP_DISABLE, /* disable DHCP capability */ -}; - -enum ChangeType -{ - ADDR_CHANGE, - STATUS_CHANGE, -}; - -struct AddressIpv4 -{ - uint32 ipv4; -}; - -struct PingResult -{ - struct AddressIpv4 ip_addr; /* response IP address */ - uint16 data_len; /* response data length */ - uint16 ttl; /* time to live */ - uint32 ticks; /* response time, unit tick */ - void *user_data; /* user-specific data */ -}; - -#define NBIOT_ADAPTER_ID 0x02U -#define ETHERNET_ADAPTER_ID 0x03U -#define WIFI_ADAPTER_ID 0x04U -#define fourG_ADAPTER_ID 0x05U - - - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_lora.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_lora.h deleted file mode 100644 index 72302708..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_lora.h +++ /dev/null @@ -1,184 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapterLora.h - * @brief lora adhoc logic - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef XS_ADAPTER_LORA_H -#define XS_ADAPTER_LORA_H - -#include "xs_adapter.h" - -#define DEVNAME_LEN_MAX 32 -#define NETNAME_LEN_MAX 32 -#define CONNECTED_CLIENTS_MAX 512 - -#define CLIENT_SEND_CELL_LEN 120 - - -struct AdapterLora { - struct Adapter parent; /* inherit from Adapter */ - const char * name; /* name of the adapter instance */ - - const char *deve_ui; /* Lora specific value */ - const char *app_key; /* Lora specific value */ - - int spi_lora_fd; -}; -typedef struct AdapterLora *AdapterLoraT; - -int LoraAdapterOpen(adapter_t padapter); -void LoraAdapterCose(struct Adapter *padapter); -int LoraAdapterSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, void *p); -int LoraAdapterReceive(adapter_t padapter, char* rev_buffer, int buffer_len,int time_out, bool block, void *p); -int LoraAdapterJoin(adapter_t sadapter, int dev_type, char* net_id); -int LoraAdapterSendc2g(adapter_t padapter, const char *data, int len, bool block, int time_out, int delay, send_success cb, void* param, void *p); - -// Client state machine -enum LoraClientStatus -{ - LORA_CLIENT_IDLE = 0, - LORA_CLIENT_LOOKING4GATEWAY, - LORA_CLIENT_CONNECTING2GATEWAY, - LORA_CLIENT_CONNECTED, - LORA_CLIENT_WAITTING_FOR_DISCONNECTED, - LORA_CLIENT_DISCONNECTED, -}; - -struct LoraClientStatusInfo -{ - enum LoraClientStatus status; - int user_id; - char gateway_name[DEVNAME_LEN_MAX]; - char client_name[DEVNAME_LEN_MAX]; -}; - -enum LoraOpcode -{ - LORA_OPCODE_START = 0xFFF0, - LORA_JOIN_REQ = 0xFFF1, - LORA_JOIN_RSP = 0xFFF2, - LORA_HANDSHAKE_REQ = 0xFFF3, - LORA_HANDSHAKE_RSP = 0xFFF4, - LORA_C2G_DATA_REQ = 0xFFF5, - LORA_C2G_DATA_RSP = 0xFFF6, - LORA_CLOSE_REQ = 0xFFF7, - LORA_CLOSE_RSP = 0xFFF8, - LORA_OPCODE_END, -}; - -enum WorkThreadStatus -{ - WORK_THREAD_RX = 1, - WORK_THREAD_TX, -}; - -// pkg header -typedef struct -{ - int op_code; - int length; -}LoraHeader; - -// Network access, handshake function protocol -typedef struct -{ - char net_id[NETNAME_LEN_MAX]; -}LoraProtoJoinReq; - -typedef struct -{ - char net_id[NETNAME_LEN_MAX]; - char gateway_name[DEVNAME_LEN_MAX]; - int signal_strength; - int error_code; -}LoraProtoJoinRsp; - -typedef struct -{ - char client_name[DEVNAME_LEN_MAX]; - char gateway_name[DEVNAME_LEN_MAX]; -}LoraProtoHandshakeReq; - -typedef struct -{ - char client_name[DEVNAME_LEN_MAX]; - char gateway_name[DEVNAME_LEN_MAX]; - int user_id; - int error_code; -}LoraProtoHandshakeRsp; - - -// Data transmission protocol -typedef struct -{ - int user_id; - int pkg_id; - int data_len; - int crc; -}LoraProtoC2GDataReq; - -typedef struct -{ - int user_id; - int ack_id; - int data_len; - int crc; -}LoraProtoC2GDataRsp; - -// Client active disconnect -typedef struct -{ - int user_id; -}LoraProtoCloseReq; - -typedef struct -{ - int user_id; - int error_code; -}LoraProtoCloseRsp; - -typedef struct -{ - char* data; - int len; - bool* prsp_flag; - adapter_t padapter; -}CheckRspParam; - -typedef void(*send_success)(void* param); -typedef struct -{ - bool has_data; - int crc; - int pkg_id; - char data[CLIENT_SEND_CELL_LEN]; - int data_len; - send_success callback; - void* param; -}ClientSendCell; - -typedef struct -{ - char user_name[DEVNAME_LEN_MAX]; - int user_id; - DoubleLinklistType link; -}OnlineUser; - - -#endif - diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_manager.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_manager.h deleted file mode 100644 index a9646d99..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_manager.h +++ /dev/null @@ -1,43 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_manager.h - * @brief manager adapter list - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#ifndef ADAPTER_MANAGER_H_ -#define ADAPTER_MANAGER_H_ -#include "xs_adapter.h" -#include - -void LoraAdapterInit(); -void LoraAdapterRegister(adapter_t padapter); -void* LoraAdapterFind(char* name); - - -void ATAdapterInit(); -void ATAdapterRegister(struct AdapterAT* at_adapter); -void* ATAdapterFind(uint32 adapter_id); - -void ZigbeeAdapterInit(); -void ZigbeeAdapterRegister(adapter_t padapter); -void* ZigbeeAdapterFind(char* name); - -void BluetoothAdapterInit(); -void BluetoothAdapterRegister(adapter_t padapter); -void* BluetoothAdapterFind(char* name); - -#endif \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_zigbee.h b/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_zigbee.h deleted file mode 100644 index e802297c..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/include/xs_adapter_zigbee.h +++ /dev/null @@ -1,41 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** -* @file: xs_adapter_zigbee.h -* @brief: head file -* @version: 1.0 -* @author: AIIT XUOS Lab -* @date: 2021/4/25 -* -*/ -#ifndef XS_ADAPTER_ZIGBEE_H -#define XS_ADAPTER_ZIGBEE_H -#include "xs_adapter.h" - -struct AdapterZigbee { - struct Adapter parent; /* inherit from Adapter */ - const char * name; /* name of the adapter instance */ - - const char * device_type; /* type of the adapter instance */ - - -}; -typedef struct AdapterZigbee *adapterZigbee_t; - -int ZigbeeOpen(struct Adapter *padapter); -int ZigbeeSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved); -int ZigbeeReceive(struct Adapter *padapter, char* rev_buffer, int buffer_len,int time_out, bool block, void* reserved); -void ZigbeeClose(struct Adapter *padapter); - - -#endif diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/lora/Kconfig b/Ubiquitous/XiUOS/framework/connection/Adapter/lora/Kconfig deleted file mode 100644 index cf7c1be1..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/lora/Kconfig +++ /dev/null @@ -1,30 +0,0 @@ -#LORA adhoc configuration -#Set same netID, and then join one adhoc net. - -menuconfig CONNECTION_COMMUNICATION_BOOTSTART_LORA_NET_SAMPLE - bool "use bootstart lora net sample" - default n - - if CONNECTION_COMMUNICATION_BOOTSTART_LORA_NET_SAMPLE - menuconfig CONNECTION_COMMUNICATION_SET_AS_LORA_CLIENT - bool "set this as lora client " - default n - - if CONNECTION_COMMUNICATION_SET_AS_LORA_CLIENT - config CONNECTION_COMMUNICATION_LORA_CLIENT_NAME - string "config lora net client name" - default "lora_client_name0" - - config CONNECTION_COMMUNICATION_LORA_CLIENT_PKG_COUNT - int "config lora send pkg count" - default 1000 - endif - - menuconfig CONNECTION_COMMUNICATION_SET_AS_LORA_GATEWAY - bool "set this as lora gateway" - default n - - config CONNECTION_COMMUNICATION_LORA_NET_ID - string "config lora net ID" - default "intelligence grain" - endif diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/lora/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/lora/Makefile deleted file mode 100644 index 15159716..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/lora/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_FILES := xs_adapter_lora.c - -include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/lora/xs_adapter_lora.c b/Ubiquitous/XiUOS/framework/connection/Adapter/lora/xs_adapter_lora.c deleted file mode 100644 index 1ee255a9..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/lora/xs_adapter_lora.c +++ /dev/null @@ -1,849 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_AdapterLora.c - * @brief lora adhoc logic - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include -#include "spi_lora_sx12xx.h" -#include "radio.h" -#include "sx1276-LoRa.h" -#include -#include -#include -#include -#include - -#define SLEEP_MS 50 - -enum NetRoleType _role_type = ROLE_TYPE_NONE; - -bool gateway_listening = false; -bool client_work_thread_running = false; - -char gateway_net_id[NETNAME_LEN_MAX]; -char client_net_id[NETNAME_LEN_MAX]; - -struct LoraClientStatusInfo g_client_status_info; -int pkg_id_c2g; - -#define BUFFER_LEN_MAX 120 -char rec_buffer[BUFFER_LEN_MAX] = {0}; -char client_rec_buffer[BUFFER_LEN_MAX] = {0}; - -char send_buffer[BUFFER_LEN_MAX] = {0}; - -DoubleLinklistType online_user_head = {&online_user_head, &online_user_head}; - -// Client send buffer. Only one element is set temporarily -ClientSendCell client_send_buffer; -int pclient_send_buffer_mutex = -1; -enum WorkThreadStatus g_work_thread_status; -bool need_send_data = false; - -int32 ServerTask = NONE; -int32 ClientTask = NONE; - -static int spi_lora_fd_intern = -1; - -extern tRadioDriver *Radio; - -/** - * @description: open lora adapter - * @param padapter - lora adapter pointer - */ -int LoraAdapterOpen(adapter_t padapter) -{ - int fd = ((AdapterLoraT)padapter)->spi_lora_fd; - if (fd < 0){ - fd = open(LORA_SPI_NAME,O_RDWR); - } - - if(fd < 0){ - printf("LoRa check failed!\n!"); - return ERROR; - } else { - ((AdapterLoraT)padapter)->spi_lora_fd = fd; - spi_lora_fd_intern = fd; - - memset(&g_client_status_info, 0, sizeof(g_client_status_info)); - g_client_status_info.status = LORA_CLIENT_IDLE; - pkg_id_c2g = 1; - _role_type = ROLE_TYPE_NONE; - printf("LoRa check ok!\nNote: The length of the message that can be sent in a single time is 120 characters\n"); - } - - return 0; -} - -/** - * @description: close lora adapter - * @param padapter - lora adapter pointer - */ -void LoraAdapterCose(struct Adapter *padapter) -{ - if (((AdapterLoraT)padapter)->spi_lora_fd < 0){ - printf("LoRa device not found! close failed!\n"); - } else { - // Disconnect - int count = 10; - g_client_status_info.status = LORA_CLIENT_WAITTING_FOR_DISCONNECTED; - need_send_data = true; - while (count > 0 && g_client_status_info.status == LORA_CLIENT_WAITTING_FOR_DISCONNECTED) - { - printf("lora client waitting for rsp, count(%d)\n", count); - UserTaskDelay(1000); - count--; - } - if (g_client_status_info.status != LORA_CLIENT_DISCONNECTED){ - printf("lora send close pkg failed!\n"); - } - - gateway_listening = false; - client_work_thread_running = false; - - // Release thread - if (NONE != ServerTask){ - UserTaskDelete(ServerTask); - ServerTask = NONE; - } - - if (NONE != ClientTask){ - UserTaskDelete(ClientTask); - ClientTask = NONE; - } - - // Release the lock - if (pclient_send_buffer_mutex != NONE) { - UserMutexDelete(pclient_send_buffer_mutex); - pclient_send_buffer_mutex = NONE; - } - - // Driver - ((AdapterLoraT)padapter)->spi_lora_fd = NONE; - } -} - -bool lora_channel_avtive() -{ - return Radio->ChannelEmpty() == 0; -} - -int LoraAdapterSend(struct Adapter *padapter, const char *data, int len, bool block, int time_out, int delay, void *p) -{ - if (len > 120){ - printf("ERROR:The message is too long!\n"); - return ERROR; - } - - if (((AdapterLoraT)padapter)->spi_lora_fd == NONE){ - printf("LoRa device not found!\n"); - return ERROR; - } else { - int time_counter = 0; - int time_sleep_gap = 500; - while (false == lora_channel_avtive()) { - - if (time_counter * time_sleep_gap > time_out) { - printf("LoRa_adapter_send failed! time_out!\n"); - return ERROR; - } - - - UserTaskDelay(time_sleep_gap); - time_counter++; - } - - char Msg[120] = {0}; - memcpy(Msg, data, len); - - Radio->SetTxPacket(Msg, len); - while (Radio->Process() != RF_TX_DONE) - ; - } -} - -int lora_send(const char *data, int len) -{ - if (len > 120) { - printf("ERROR:The message is too long!\n"); - return ERROR; - } - - int time_counter = 0; - int time_sleep_gap = 500; - int time_out = 10000; - while (false == lora_channel_avtive()) { - if (time_counter * time_sleep_gap > time_out) { - printf("LoRa send failed! time_out!\n"); - return ERROR; - } - - UserTaskDelay(time_sleep_gap); - time_counter++; - } - - char Msg[120] = {0}; - memcpy(Msg, data, len); - - Radio->SetTxPacket(Msg, len); - while (Radio->Process() != RF_TX_DONE) - ; -} - -int LoraAdapterSendc2g(adapter_t padapter, const char *data, int len, bool block, int time_out, int delay, send_success cb, void *param, void *p) -{ - if (_role_type == ROLE_TYPE_SLAVE && g_client_status_info.status == LORA_CLIENT_CONNECTED) { - LoraHeader header; - memset(&header, 0, sizeof(header)); - header.op_code = LORA_C2G_DATA_REQ; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoC2GDataReq) + len; - - if (header.length > 120) { - printf("Send failed, LoRa send max length is 120.\n"); - return ERROR; - } - - if (client_send_buffer.has_data == true){ - printf("Send failed, last pakage is resending\n"); - return ERROR; - } - - LoraProtoC2GDataReq req; - req.user_id = g_client_status_info.user_id; - req.pkg_id = pkg_id_c2g; - req.data_len = len; - req.crc = 0xFFFF; - - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &req, sizeof(req)); - memcpy(send_buffer + sizeof(header) + sizeof(req), data, len); - - // Write data to buffer (lock) - UserMutexObtain(pclient_send_buffer_mutex, WAITING_FOREVER); - client_send_buffer.has_data = true; - client_send_buffer.pkg_id = pkg_id_c2g; - client_send_buffer.crc = 0xFFFF; - memcpy(client_send_buffer.data, send_buffer, header.length); - client_send_buffer.data_len = header.length; - client_send_buffer.callback = cb; - client_send_buffer.param = param; - - UserMutexAbandon(pclient_send_buffer_mutex); - // printf("copy to send buffer. len = %d\n", header.length); - - // Switch worker thread state - need_send_data = true; - return ERROR; - } else { - printf("LoRa client is unconnected! can not send data!\n"); - } -} - -OnlineUser *find_user_by_name(char *name) -{ - DoubleLinklistType *pLink; - DOUBLE_LINKLIST_FOR_EACH(pLink, &online_user_head) - { - OnlineUser *pUer =CONTAINER_OF(pLink, OnlineUser, link); - if (strcmp(pUer->user_name, name) == 0) { - return pUer; - } - } - return NONE; -} - -OnlineUser *find_user_by_id(int id) -{ - DoubleLinklistType *pLink; - DOUBLE_LINKLIST_FOR_EACH(pLink, &online_user_head) - { - OnlineUser *pUer =CONTAINER_OF(pLink, OnlineUser, link); - if (pUer->user_id == id) { - return pUer; - } - } - return NONE; -} - -int insert_connected_clients(char *name, int user_id) -{ - OnlineUser *pUser = malloc(sizeof(OnlineUser)); - if (NONE == pUser) - return ERROR; - - pUser->user_id = user_id; - strcpy(pUser->user_name, name); - DoubleLinkListInsertNodeAfter(&online_user_head, &pUser->link); - - return EOK; -} - - - - -void CheckSendBuffer() -{ - UserMutexObtain(pclient_send_buffer_mutex, WAITING_FOREVER); - if (client_send_buffer.has_data == true) - { - //Sending or packet loss retransmission - lora_send(client_send_buffer.data, client_send_buffer.data_len); - // printf("client check and send. len = %d\n", client_send_buffer.data_len); - } - UserMutexAbandon(pclient_send_buffer_mutex); -} - -int LoraAdapterReceive(adapter_t padapter, char *rev_buffer, int buffer_len, int time_out, bool block, void *p) -{ - uint16 BufferSize = buffer_len; - - memset(rev_buffer, 0, buffer_len); - - if (((AdapterLoraT)padapter)->spi_lora_fd == NONE) - { - printf("LoRa device not found!\n"); - } - else - { - Radio->StartRx(); - printf("Ready!\n"); - - while (Radio->Process() != RF_RX_DONE) - { - // if (time_out < 0) - // { - // printf("LoRa device receive failed! Timeout!\n"); - // return ERROR; - // } - // UserTaskDelay(SLEEP_MS); - // time_out -= SLEEP_MS; - } - - Radio->GetRxPacket(rev_buffer, (uint16 *)&BufferSize); - if (BufferSize == buffer_len) - { - printf("rev_buffer is too small!\n"); - } - - printf("RX : %s\n", rev_buffer); - return BufferSize; - } -} - -int LoraGatewayProcess(adapter_t padapter, char *pkg, int rec_len) -{ - struct AdapterLora* lora_adapter = (struct AdapterLora*)padapter; - - if (rec_len > 0) - { - LoraHeader header; - memset(&header, 0, sizeof(header)); - char *start = (char *)pkg; - LoraHeader *pheader = (LoraHeader *)start; - - char *req = start + sizeof(LoraHeader); - - if (pheader->op_code < LORA_OPCODE_START || pheader->op_code > LORA_OPCODE_END) - { - printf("Illegal opcode, discard data!\n"); - return ERROR; - } - - if (rec_len != pheader->length) - { - printf("pkg is not complete!, discard data!\n"); - return ERROR; - } - - printf("gateway receive pkg opcode(%4x)\n", pheader->op_code); - switch (pheader->op_code) - { - case LORA_JOIN_REQ: // There is a client request to join the network segment - if (strcmp(((LoraProtoJoinReq *)req)->net_id, gateway_net_id) == 0) // The request is really this network segment - { - LoraProtoJoinReq *req = (LoraProtoJoinReq *)(start + sizeof(LoraHeader)); - - header.op_code = LORA_JOIN_RSP; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoJoinRsp); - - LoraProtoJoinRsp rsp; - memset(&rsp, 0, sizeof(rsp)); - rsp.error_code = 0; - rsp.signal_strength = 0; // signal intensity - - printf(" 11aa strlen(lora_adapter->name) = %d\n", strlen(lora_adapter->name)); - printf(" 11aa lora_adapter->name = %s\n", lora_adapter->name); - - strcpy(rsp.gateway_name, lora_adapter->name); - strcpy(rsp.net_id, gateway_net_id); - - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &rsp, sizeof(rsp)); - - printf("LoraProtoJoinRsp rsp.gateway_name = %s rsp.net_id = %s\n", rsp.gateway_name, rsp.net_id); - lora_send(send_buffer, header.length); - printf("gateway send pkg opcode(%4x)\n", header.op_code); - } - break; - case LORA_HANDSHAKE_REQ: - if (strcmp(((LoraProtoHandshakeReq *)req)->gateway_name, lora_adapter->name) == 0) //The other party really wants to connect themselves - { - // Construction reply, connection confirmation - LoraProtoHandshakeReq *req = (LoraProtoHandshakeReq *)(start + sizeof(LoraHeader)); - int user_id = 0; - // If it can be found, it will prove that it is connected. It may be packet loss, so normal retransmission is good - OnlineUser *pUser = find_user_by_name(req->client_name); - if (NONE == pUser) - { - // New virtual connection - user_id = rand() % UINT32_SIZE_MAX; - if (ERROR == insert_connected_clients(req->client_name, user_id)) - { - return ERROR; - } - } - else - { - // Packet loss - user_id = pUser->user_id; - } - - header.op_code = LORA_HANDSHAKE_RSP; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoHandshakeRsp); - - LoraProtoHandshakeRsp rsp; - memset(&rsp, 0, sizeof(rsp)); - strcpy(rsp.client_name, req->client_name); - strcpy(rsp.gateway_name, req->gateway_name); - rsp.user_id = user_id; - rsp.error_code = 0; - - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &rsp, sizeof(rsp)); - - printf("LoraProtoHandshakeRsp, rsp.client_name = %s, rsp.gateway_name = %s\n", rsp.client_name, rsp.gateway_name); - lora_send(send_buffer, header.length); - printf("gateway send pkg opcode(%4x)\n", header.op_code); - } - break; - case LORA_C2G_DATA_REQ: - { - OnlineUser *pUser = find_user_by_id(((LoraProtoC2GDataReq *)req)->user_id); - if (pUser) //What the other party wants to send is really himself - { - LoraProtoC2GDataReq *req = (LoraProtoC2GDataReq *)(start + sizeof(LoraHeader)); - - char *data = start + sizeof(LoraHeader) + sizeof(LoraProtoC2GDataReq); - // printf("receive data from client(%s), content(%s)", pUser->user_name, data); - printf(" receive data from \033[0;31m client(%s)\033[0m \n, content(%s). ", pUser->user_name, data); - // Logic layer to deal with - - // CRC calculation for data and reply to client - header.op_code = LORA_C2G_DATA_RSP; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoC2GDataRsp); - - LoraProtoC2GDataRsp rsp; - memset(&rsp, 0, sizeof(rsp)); - rsp.user_id = req->user_id; - rsp.crc = 0xFFFF; - rsp.data_len = rec_len; - rsp.ack_id = req->pkg_id; - - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &rsp, sizeof(rsp)); - - lora_send(send_buffer, header.length); - printf("gateway send pkg opcode(%4x), ack_id(%d)\n", header.op_code, rsp.ack_id); - } - else - { - printf("user unconnected, discard data.\n"); - return ERROR; - } - - break; - } - - case LORA_CLOSE_REQ: - { - LoraProtoCloseReq *req = (LoraProtoCloseReq *)(start + sizeof(LoraHeader)); - - //log - DoubleLinklistType *pNode; - printf("******** connected users *********\n"); - DOUBLE_LINKLIST_FOR_EACH(pNode, &online_user_head) - { - OnlineUser *pUser =CONTAINER_OF(pNode, OnlineUser, link); - printf("pUser->user_name %s\n", pUser->user_name); - printf("pUser->user_id %d\n", pUser->user_id); - } - printf("*********************************\n"); - - printf("req->user_id = %d\n", req->user_id); - - OnlineUser *pUser = find_user_by_id(req->user_id); - if (pUser) - { - DoubleLinkListRmNode(&pUser->link); - - header.op_code = LORA_CLOSE_RSP; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoCloseRsp); - LoraProtoCloseRsp rsp; - rsp.error_code = 0; - rsp.user_id = req->user_id; - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &rsp, sizeof(rsp)); - - lora_send(send_buffer, header.length); - } - else - { - printf("find user failed!\n"); - } - } - - default: - break; - } - } -} - -// Gateway receives requests from clients in a loop -static void LoraGateWayListen(void *parameter) -{ - // adapter_t padapter = (adapter_t)malloc(sizeof(struct Adapter)); - adapter_t padapter = parameter; - - while (gateway_listening) - { - printf("444x(0x%x), %d, %d \n", ((AdapterLoraT)padapter)->spi_lora_fd, sizeof(rec_buffer), BUFFER_LEN_MAX); - - int rec_len = LoraAdapterReceive(padapter, rec_buffer, BUFFER_LEN_MAX, 10000, true, NULL); - UserTaskDelay(50); //Afraid the other party hasn't entered reception mode yet - LoraGatewayProcess(padapter, rec_buffer, rec_len); - } -} - -// All client packets received are processed here -int LoraClientProcess(adapter_t padapter, char *pkg, int pkg_len) -{ - struct AdapterLora* lora_adapter = (struct AdapterLora*)padapter; - - LoraHeader *pheader = (LoraHeader *)pkg; - - LoraHeader header; - memset(&header, 0, sizeof(header)); - - char *data_start = pkg + sizeof(LoraHeader); - - switch (pheader->op_code) - { - case LORA_JOIN_RSP: - if (g_client_status_info.status == LORA_CLIENT_LOOKING4GATEWAY) - { - LoraProtoJoinRsp *data = (LoraProtoJoinRsp *)data_start; - - if (strcmp(data->net_id, client_net_id) != 0) - break; - - strcpy(g_client_status_info.gateway_name, data->gateway_name); - strcpy(g_client_status_info.client_name, lora_adapter->name); - g_client_status_info.status = LORA_CLIENT_CONNECTING2GATEWAY; - break; - } - case LORA_HANDSHAKE_RSP: - if (strcmp(((LoraProtoHandshakeRsp *)data_start)->client_name, lora_adapter->name) == 0) // Confirm that it was sent to yourself - { - LoraProtoHandshakeRsp *data = (LoraProtoHandshakeRsp *)data_start; - printf("get LoraProtoHandshakeRsp data->client_name = %s, data->gateway_name = %s\n", data->client_name, data->gateway_name); - // Confirm the connection and record the status information - g_client_status_info.status = LORA_CLIENT_CONNECTED; - g_client_status_info.user_id = data->user_id; - - strcpy(g_client_status_info.gateway_name, data->gateway_name); - - printf("client is connected to gateway(%s)\n", g_client_status_info.gateway_name); - } - break; - - case LORA_C2G_DATA_RSP: - if (((LoraProtoC2GDataRsp *)data_start)->user_id == g_client_status_info.user_id) // Confirm that it was sent to yourself - { - LoraProtoC2GDataRsp *data = (LoraProtoC2GDataRsp *)data_start; - - UserMutexObtain(pclient_send_buffer_mutex, WAITING_FOREVER); - if (data->ack_id == client_send_buffer.pkg_id) - { - if (data->crc == client_send_buffer.crc) - { - // Send successfully, execute external callback - if (client_send_buffer.callback) - { - client_send_buffer.callback(client_send_buffer.param); - } - - //Reset buffer - memset(&client_send_buffer, 0, sizeof(client_send_buffer)); - client_send_buffer.has_data = false; - // printf("pkg_id(%d) get ack. send success. send buffer clear.\n", data->ack_id); - - pkg_id_c2g++; - } - else - { - // Then do nothing and wait for the retransmission - printf("client send failed.crc check failed.\n"); - } - } - UserMutexAbandon(pclient_send_buffer_mutex); - } - break; - - case LORA_CLOSE_RSP: - { - LoraProtoCloseRsp *rsp = (LoraProtoCloseRsp *)data_start; - printf("case LORA_CLOSE_RSP rsp->user_id(%d)\n", rsp->user_id); - printf("case LORA_CLOSE_RSP g_client_status_info.user_id(%d)\n", g_client_status_info.user_id); - - if (rsp->user_id == g_client_status_info.user_id) - { - g_client_status_info.status = LORA_CLIENT_DISCONNECTED; - printf("client close success.\n"); - } - } - break; - default: - break; - } -} - -// The client requests the gateway to disconnect -int SendCloseReq() -{ - LoraHeader header; - memset(&header, 0, sizeof(header)); - header.op_code = LORA_CLOSE_REQ; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoCloseReq); - - LoraProtoCloseReq req; - req.user_id = g_client_status_info.user_id; - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &req, sizeof(req)); - printf("client send close req"); - lora_send(send_buffer, header.length); -} - -// The client broadcasts the name of the network segment that it wants to join -int SendJoinReq(char *net_id) -{ - LoraHeader header; - memset(&header, 0, sizeof(header)); - header.op_code = LORA_JOIN_REQ; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoJoinReq); - - LoraProtoJoinReq req; - memcpy(req.net_id, net_id, strlen(net_id) + 1); - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &req, sizeof(req)); - - lora_send(send_buffer, header.length); -} - -// Client requests connection from gateway -int SendHandShakeReq(char *client_name, char *gateway_name) -{ - LoraHeader header; - header.op_code = LORA_HANDSHAKE_REQ; - header.length = sizeof(LoraHeader) + sizeof(LoraProtoHandshakeReq); - - LoraProtoHandshakeReq req; - memset(&req, 0, sizeof(req)); - strcpy(req.client_name, client_name); - strcpy(req.gateway_name, gateway_name); - - memcpy(send_buffer, &header, sizeof(header)); - memcpy(send_buffer + sizeof(header), &req, sizeof(req)); - - printf("LoraProtoHandshakeReq, req.client_name = %s req.gateway_name = %s\n", req.client_name, req.gateway_name); - lora_send(send_buffer, header.length); -} - -void work_thread_process(adapter_t padapter) -{ - if (g_work_thread_status == WORK_THREAD_RX) - { - // printf("client start receiving \n"); - uint16 len; - int counter = 0; - Radio->StartRx(); - while (Radio->Process() != RF_RX_DONE) - { - // Receive external signal, check buffer - if (need_send_data) - { - g_work_thread_status = WORK_THREAD_TX; - need_send_data = false; - return; - } - - //Jump out of the monitoring cycle regularly to see if there is anything that needs to be retransmitted - if (counter > 100000) - { - if (g_client_status_info.status == LORA_CLIENT_CONNECTED) - { - //printf("check send buffer.\n"); - } - if (g_client_status_info.status >= LORA_CLIENT_LOOKING4GATEWAY && - g_client_status_info.status <= LORA_CLIENT_CONNECTING2GATEWAY) - { - printf("retry to handshake.\n"); - } - g_work_thread_status = WORK_THREAD_TX; - // printf("client end receiving.\n"); - return; - } - counter++; - } - - enum LoraClientStatus old = g_client_status_info.status; - Radio->GetRxPacket(rec_buffer, (uint16 *)&len); - LoraClientProcess(padapter, rec_buffer, len); - - //If the client state machine changes, there is something to send to the other party, and it will be switched to the sending mode immediately - if (old != g_client_status_info.status) - { - g_work_thread_status = WORK_THREAD_TX; - UserTaskDelay(50); //Afraid the other party hasn't entered reception mode yet - } - } - else if (g_work_thread_status == WORK_THREAD_TX) - { - // Temporarily designed not to be interrupted - switch (g_client_status_info.status) - { - case LORA_CLIENT_LOOKING4GATEWAY: - SendJoinReq(client_net_id); - break; - case LORA_CLIENT_CONNECTING2GATEWAY: - SendHandShakeReq(g_client_status_info.client_name, g_client_status_info.gateway_name); - break; - case LORA_CLIENT_CONNECTED: - CheckSendBuffer(); - break; - case LORA_CLIENT_WAITTING_FOR_DISCONNECTED: - SendCloseReq(g_client_status_info.user_id); - break; - default: - break; - } - g_work_thread_status = WORK_THREAD_RX; - } -} - -void static work_thread_func(void *parameter) -{ - adapter_t padapter = (adapter_t)parameter; - while (client_work_thread_running) - { - work_thread_process(padapter); - } -} -int LoraAdapterJoin(adapter_t padapter, int net_role_type, char *net_id) -{ - UtaskType lora_utask_master; - UtaskType lora_utask_slave; - do - { - _role_type = net_role_type; - x_err_t err; - if (_role_type == ROLE_TYPE_MASTER) - { - strcpy(gateway_net_id, net_id); - - //Single child thread gateway loop monitoring req - gateway_listening = true; - printf("GateWayListen thread create..."); - - strncpy(lora_utask_master.name,"GateWayListen",strlen("GateWayListen")); - lora_utask_master.func_entry = LoraGateWayListen; - lora_utask_master.func_param = padapter; - lora_utask_master.prio = 10; - lora_utask_master.stack_size = 2048; - - ServerTask = UserTaskCreate(lora_utask_master); - err = UserTaskStartup(ServerTask); - if (err != EOK) - { - break; - } - } - else if (_role_type == ROLE_TYPE_SLAVE) - { - strcpy(client_net_id, net_id); - - // Create lock - if (pclient_send_buffer_mutex < 0) - { - pclient_send_buffer_mutex = UserMutexCreate(); - } - - // Update state machine - g_client_status_info.status = LORA_CLIENT_LOOKING4GATEWAY; - - // Start single child thread client loop to listen for RSP - client_send_buffer.has_data = false; - memset(&client_send_buffer, 0, sizeof(client_send_buffer)); - client_work_thread_running = true; - g_work_thread_status = WORK_THREAD_TX; - - strncpy(lora_utask_slave.name,"ClientWorkThread",strlen("ClientWorkThread")); - lora_utask_slave.func_entry = work_thread_func; - lora_utask_slave.func_param = padapter; - lora_utask_slave.prio = 10; - lora_utask_slave.stack_size = 2048; - - ClientTask = UserTaskCreate(lora_utask_slave); - - err = UserTaskStartup(ClientTask); - if (err != EOK) - { - break; - } - // Block detection for a period of time - int counter = 100; - while (counter > 0) - { - if (g_client_status_info.status == LORA_CLIENT_CONNECTED) - { - break; // Successful connection - } - else - { - UserTaskDelay(300); - } - counter--; - } - return (counter > 0) ? 0 : ERROR; - } - return EOK; - } while (false); - - // Exception handling, releasing resources - LoraAdapterCose(padapter); - return ERROR; -} diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/Makefile deleted file mode 100644 index 36a6f4c5..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_DIR := bc28 - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/Makefile deleted file mode 100644 index 88ac0d61..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_FILES := xs_adapter_at_nbiot.c xs_adapter_at_nbiot_register.c - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/xs_adapter_at_nbiot.c b/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/xs_adapter_at_nbiot.c deleted file mode 100644 index 6fde6322..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/xs_adapter_at_nbiot.c +++ /dev/null @@ -1,339 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_nbiot.c - * @brief BC28 NBIoT driver base connection framework - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include -#include -#include -#include - -#define NBIOT_DEVICE_NAME "/dev/usart2_dev2" - -#define SOCKET_TYPE_DGRAM (0) -#define SOCKET_TYPE_STREAM (1) - -#define SOCKET_PROTOCOL_TCP (6) -#define SOCKET_PROTOCOL_UDP (17) - -#define NET_TYPE_AF_INET (0) -#define NET_TYPE_AF_INET6 (1) - -/** - * @description: Open NBIoT device - * @param padapter - NBIoT adapter - * @return success: EOK, failure: -ERROR - */ -int NbiotOpen(struct Adapter *padapter) -{ - char *agent_name = "urat2_client"; - const char *device_name = NBIOT_DEVICE_NAME; - - struct SerialCfgParam cfg; - cfg.data_cfg.serial_baud_rate = BAUD_RATE_9600; - cfg.data_cfg.serial_bit_order = 0; - cfg.data_cfg.serial_buffer_size = SERIAL_RB_BUFSZ; - cfg.data_cfg.serial_data_bits = DATA_BITS_8; - cfg.data_cfg.serial_invert_mode = 0; - cfg.data_cfg.serial_parity_mode = PARITY_NONE; - cfg.data_cfg.serial_stop_bits = STOP_BITS_1; - - if (InitATAgent(agent_name, device_name, 512, &cfg)) { - printf("NBIoT open failed!\n"); - return -ERROR; - } - - ATAgentType at_agent = GetATAgent(agent_name); - if (NULL == at_agent) { - printf("Get AT agent failed!\n"); - return -ERROR; - } - - UserTaskDelay(3000); - struct AdapterAT *nbiot = (struct AdapterAT *)padapter; - nbiot->agent = at_agent; - - return EOK; -} - -/** - * @description: NBIoT device create a socket connection - * @param adapterAT - NBIoT adapter AT - * @param socket_fd - socket file description - * @param type - socket type - * @param af_type - IPv4 or IPv6 - * @return success: EOK, failure: -ERROR - */ -int NBIoTSocketCreate(struct AdapterAT *adapterAT, uint8_t socket_fd, uint8_t type, uint8_t af_type ) -{ - int32 result; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at create failed ! \n"); - result = -ERROR; - goto __exit; - } - - if ( af_type == NET_TYPE_AF_INET6) { - printf("IPv6 not surport !\n"); - return -1; - } - - char *str_af_type = "AF_INET"; - char *str_type; - char str_fd[3] = {0}; - char *str_protocol ; - char at_cmd[64] = {0}; - char *linsten_port = "0"; - struct Socket socket = {0}; - socket.status = SOCKET_STATUS_INIT; - socket.af_type = NET_TYPE_AF_INET; - - if (type == SOCKET_TYPE_STREAM) { //tcp = AT+NSOCR=STREAM,6,0,1,AF_INET - socket.type = SOCKET_TYPE_STREAM; - socket.protocal = SOCKET_PROTOCOL_TCP; - str_type = "STREAM"; - char *str_protocol = "6"; - if (socket_fd > 0 && socket_fd < 8) { - str_fd[0] = socket_fd + '0'; - } else - str_fd[0] = '0'; - - memcpy(at_cmd, "AT+NSOCR=", 9); - strcat(at_cmd, str_type); - strcat(at_cmd, ","); - strcat(at_cmd, str_protocol); - strcat(at_cmd, ","); - strcat(at_cmd, linsten_port); - strcat(at_cmd, ","); - strcat(at_cmd, str_fd); - strcat(at_cmd, ","); - strcat(at_cmd, str_af_type); - strcat(at_cmd, "\n"); - - }else if ( type == SOCKET_TYPE_DGRAM ){ //udp - socket.type = SOCKET_TYPE_DGRAM; - socket.protocal = SOCKET_PROTOCOL_UDP; - str_type = "DGRAM"; - char *str_protocol = "17"; - if (socket_fd > 0 && socket_fd < 8){ - str_fd[0] = socket_fd + '0'; - }else - str_fd[0] = '0'; - - memcpy(at_cmd, "AT+NSOCR=", 9); - strcat(at_cmd, str_type); - strcat(at_cmd, ","); - strcat(at_cmd, str_protocol); - strcat(at_cmd, ","); - strcat(at_cmd, linsten_port); - strcat(at_cmd, ","); - strcat(at_cmd, str_fd); - strcat(at_cmd, ","); - strcat(at_cmd, str_af_type); - strcat(at_cmd, "\n"); - - }else{ - printf("error socket type \n"); - return -1 ; - } - - printf("cmd : %s\n", at_cmd); - ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, reply, at_cmd); - UserTaskDelay(3000); - printf("bak : "); - for(int i = 0; i < strlen(reply->reply_buffer); i++) - printf(" 0x%02x", reply->reply_buffer[i]); - printf("\n"); - - struct Socket (*socketlist)[MAX_SOCKET_NUM] = &adapterAT->socket; - memset(socketlist[socket_fd], 0, sizeof(struct Socket)); - memcpy(socketlist[socket_fd], &socket , sizeof(struct Socket)); - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -/** - * @description: NBIoT device create a socket connection - * @param adapterAT - NBIoT adapter AT - * @param socket_fd - socket file description - * @param dst_ip - ip address - * @param dst_port - ip port - * @param is_client - whether it is a client - * @return success: EOK, failure: -ERROR - */ -int NBIoTSocketConnect(struct AdapterAT *adapterAT , uint8_t socket_fd , struct AddressIpv4 dst_ip , uint16_t dst_port, uint8 is_client) -{ - int result; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at create failed ! \n"); - result = -ERROR; - goto __exit; - } - - if (socket_fd < 1 || socket_fd > 8) { - printf("socket fd error \n"); - return -1 ; - } - struct Socket (*socketlist)[SOCKET_MAX] = &adapterAT->socket; - - if (socketlist[socket_fd]->status != SOCKET_STATUS_INIT || socketlist[socket_fd]->type != SOCKET_TYPE_STREAM) { - printf("socket type error \n"); - } - - char at_cmd[64] = {0}; - char str_fd[2] = {0}; - char str_port[6] = {0}; - - str_fd[0] = socket_fd + '0'; - char *str_ip = IpTstr(dst_ip.ipv4) ; - sprintf(str_port, "%u", dst_port); - - memcpy(at_cmd, "AT+NSOCO=", 9); - strcat(at_cmd, str_fd); - strcat(at_cmd, ","); - strcat(at_cmd, str_ip); - strcat(at_cmd, ","); - strcat(at_cmd, str_port); - strcat(at_cmd, "\n"); - - printf("cmd : %s\n", at_cmd); - ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, reply, at_cmd); - UserTaskDelay(300); - - socketlist[socket_fd]->dst_ip = dst_ip; - socketlist[socket_fd]->dst_port = dst_port; - socketlist[socket_fd]->status = SOCKET_STATUS_CONNECT; - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -/** - * @description: NBIoT device close a socket connection - * @param adapterAT - NBIoT adapter AT - * @param socket_fd - socket file description - * @return success: EOK, failure: -ERROR - */ -int NBIoTSocketClose(struct AdapterAT *adapterAT, uint8_t socket_fd ) -{ - int result; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at create failed ! \n"); - result = -ERROR; - goto __exit; - } - - if (socket_fd < 1 || socket_fd > 7) { - printf("socket fd error \n"); - return -1 ; - } - - struct Socket (*socketlist)[SOCKET_MAX] = &adapterAT->socket; - - char str_fd[2] = {0}; - char at_cmd[16] = {0}; - str_fd[0] = socket_fd + '0'; - - memcpy(at_cmd, "AT+NSOCL=", 9); - strcat(at_cmd, str_fd); - strcat(at_cmd, "\n"); - - printf("cmd : %s\n", at_cmd); - ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, reply, at_cmd); - UserTaskDelay(300); - - memset(socketlist[socket_fd], 0, sizeof(struct Socket)); - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -/** - * @description: NBIoT socket send a message - * @param padapter - NBIoT adapter - * @param data - send data buffer - * @param len - send data length - * @param block - block - * @param time_out - timeout - * @param delay - delay time - * @param cb - send callback function - * @param param - send callback function parameter - * @param reserved - reserved parameter - * @return success: EOK, failure: -ERROR - */ -int NbiotSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved) -{ - uint32_t result; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at create failed ! \n"); - result = -ERROR; - goto __exit; - } - - struct AdapterAT *adapterAT = (struct AdapterAT *)padapter; - int socket_fd = *(int*)reserved; - - struct Socket (*socketlist)[SOCKET_MAX] = &adapterAT->socket; - - char at_cmd[64] = {0}; - char str_fd[2] = {0}; - char size[2] = {0}; - - str_fd[0] = socket_fd + '0'; - size[0] = len + '0'; - - memcpy(at_cmd, "AT+NSOSD=", 9); - strcat(at_cmd, str_fd); - strcat(at_cmd, ","); - strcat(at_cmd, size); - strcat(at_cmd, ","); - strcat(at_cmd, data); - strcat(at_cmd, "\n"); - - printf("cmd : %s\n", at_cmd); - ATOrderSend(adapterAT->agent, REPLY_TIME_OUT, reply, at_cmd); - UserTaskDelay(300); - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/xs_adapter_at_nbiot_register.c b/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/xs_adapter_at_nbiot_register.c deleted file mode 100644 index 26164aa8..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/nbiot/bc28/xs_adapter_at_nbiot_register.c +++ /dev/null @@ -1,69 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_nbiot_register.c - * @brief BC28 nbiot driver register to connection framework - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include -#include - -const struct AdapterDone bc28_done = -{ - NONE, - NbiotOpen, - NONE, - NbiotSend, - NONE, - NONE, -}; - -const struct ATDone bc28_at_done = -{ - NONE, - NONE, - NONE, - NONE, - NONE, - NONE, - NONE, - NONE, - NBIoTSocketCreate, - NBIoTSocketConnect, - NBIoTSocketClose, -}; - -/** - * @description: Register nbiot device - * @return success: EOK, failure: ERROR - */ -int RegisterAdapterNBIoT(void) -{ - static struct AdapterNBIoT nbiot_adapter; - - struct AdapterAT *nbiot_at_adapter = (struct AdapterAT *)&nbiot_adapter; - struct Adapter *adapter = (struct Adapter *)&nbiot_adapter; - - nbiot_adapter.parent.atdone = bc28_at_done; - nbiot_adapter.parent.parent.done = bc28_done; - - nbiot_at_adapter->at_adapter_id = NBIOT_ADAPTER_ID; - - ATAdapterInit(); - ATAdapterRegister(nbiot_at_adapter); - - return 0; -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/src/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/src/Makefile deleted file mode 100644 index 1d643230..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/src/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -SRC_FILES := xs_adapter_manager.c \ - xs_adapter_at_agent.c - -include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/src/xs_adapter_at_agent.c b/Ubiquitous/XiUOS/framework/connection/Adapter/src/xs_adapter_at_agent.c deleted file mode 100644 index 2193537c..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/src/xs_adapter_at_agent.c +++ /dev/null @@ -1,485 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapterAT_client.c - * @brief AT proxy, auto receive AT reply and transparency data - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - - -#include -#include -#include -#include -#include -#include -#include -#include - -#define AT_CMD_MAX_LEN 128 -#define AT_AGENT_MAX 2 -static char send_buf[AT_CMD_MAX_LEN]; -static uint32 last_cmd_len = 0; - -static struct ATAgent at_agent_table[AT_AGENT_MAX] = {0}; - -uint IpTint(char *ipstr){ - if (ipstr == NULL) - return 0; - - char *token; - uint i = 3, total = 0, cur; - - token = strtok(ipstr, "."); - - while (token != NULL){ - cur = atoi(token); - if (cur >= 0 && cur <= 255){ - total += cur * pow(256, i); - } - i--; - token = strtok(NULL, "."); - } - - return total; -} - -void SwapStr(char *str, int begin, int end) -{ - int i, j; - - for (i = begin, j = end; i <= j; i++, j--){ - if (str[i] != str[j]){ - str[i] = str[i] ^ str[j]; - str[j] = str[i] ^ str[j]; - str[i] = str[i] ^ str[j]; - } - } -} - -char *IpTstr(uint ipint) -{ - int LEN = 16; - char *new = (char *)malloc(LEN); - memset(new, '\0', LEN); - new[0] = '.'; - char token[4]; - int bt, ed, len, cur; - - while (ipint){ - cur = ipint % 256; - sprintf(token, "%d", cur); - strcat(new, token); - ipint /= 256; - if (ipint) - strcat(new, "."); - } - - len = strlen(new); - SwapStr(new, 0, len - 1); - - for (bt = ed = 0; ed < len;){ - while (ed < len && new[ed] != '.'){ - ed++; - } - SwapStr(new, bt, ed - 1); - ed += 1; - bt = ed; - } - - new[len - 1] = '\0'; - - return new; -} - -int ParseATReply(char *str, const char *format, ...) -{ - va_list params; - int counts = 0; - - va_start(params, format); - counts = vsscanf(str, format, params); - va_end(params); - - return counts; -} - -uint32 ATSprintf(int fd, const char *format, va_list params) -{ - last_cmd_len = vsnprintf(send_buf, sizeof(send_buf), format, params); - return write(fd, send_buf, last_cmd_len); -} - -int ATOrderSend(ATAgentType agent, uint32 timeout, ATReplyType reply, const char *cmd_expr, ...) -{ - if (agent == NULL){ - printf("ATAgent is null"); - return -ERROR; - } - - agent->receive_mode = AT_MODE; - - memset(agent->maintain_buffer, 0x00, agent->maintain_max); - agent->maintain_len = 0; - - memset(agent->entm_recv_buf, 0, ENTM_RECV_MAX); - agent->entm_recv_len = 0; - - va_list params; - uint32 cmd_size = 0; - uint32 result = EOK; - const char *cmd = NULL; - - UserMutexObtain(agent->lock, WAITING_FOREVER); - - agent->reply = reply; - - if(agent->reply != NULL){ - reply->reply_len = 0; - va_start(params, cmd_expr); - ATSprintf(agent->fd, cmd_expr, params); - va_end(params); - - if (UserSemaphoreObtain(agent->rsp_sem, timeout) != EOK){ - result = -ETIMEOUT; - goto __out; - } - }else{ - va_start(params, cmd_expr); - ATSprintf(agent->fd, cmd_expr, params); - va_end(params); - goto __out; - } - -__out: - agent->reply = NULL; - UserMutexAbandon(agent->lock); - agent->receive_mode = ENTM_MODE; - - return result; -} - -char *GetReplyText(ATReplyType reply) -{ - return reply->reply_buffer; -} - -int EntmSend(ATAgentType agent, const char *data, int len) -{ - char send_buf[128]; - memset(send_buf, 0, 128); - agent->receive_mode = ENTM_MODE; - - memcpy(send_buf, data, len); - memcpy(send_buf + len, "!@", 2); - - write(agent->fd, send_buf, len + 2); -} - -int EntmRecv(ATAgentType agent, char *rev_buffer, int buffer_len, int time_out) -{ - UserTaskDelay(1000); - - memset(agent->entm_recv_buf, 0, ENTM_RECV_MAX); - agent->entm_recv_len = 0; - - UserSemaphoreSetValue(agent->entm_rx_notice, 0); - - if (UserSemaphoreObtain(agent->entm_rx_notice, time_out)){ - return ERROR; - } - - if (buffer_len < agent->entm_recv_len){ - return ERROR; - } - - printf("EntmRecv once .\n"); - - agent->entm_recv_buf[agent->entm_recv_len - 2] = '\0'; - memcpy(rev_buffer, agent->entm_recv_buf, agent->entm_recv_len - 2); - - return EOK; -} - -static int GetCompleteATReply(ATAgentType agent) -{ - uint32 read_len = 0; - char ch = 0, last_ch = 0; - bool is_full = false; - - memset(agent->maintain_buffer, 0x00, agent->maintain_max); - agent->maintain_len = 0; - - while (1){ - read(agent->fd, &ch, 1); - - printf(" %c(0x%x)\n", ch, ch); - - if (agent->receive_mode == ENTM_MODE){ - if (agent->entm_recv_len < ENTM_RECV_MAX){ - agent->entm_recv_buf[agent->entm_recv_len++] = ch; - - if (last_ch == '!' && ch == '@'){ - UserSemaphoreAbandon(agent->entm_rx_notice); - } - - last_ch = ch; - } - else{ - printf("entm_recv_buf is_full ...\n"); - } - } - else if (agent->receive_mode == AT_MODE){ - if (read_len < agent->maintain_max){ - agent->maintain_buffer[read_len++] = ch; - agent->maintain_len = read_len; - }else{ - printf("maintain_len is_full ...\n"); - is_full = true; - } - - if ((ch == '\n' && last_ch == '\r')){ - if (is_full){ - printf("read line failed. The line data length is out of buffer size(%d)!", agent->maintain_max); - memset(agent->maintain_buffer, 0x00, agent->maintain_max); - agent->maintain_len = 0; - return -ERROR; - } - printf("GetCompleteATReply get n r ...\n"); - break; - } - last_ch = ch; - } - } - - return read_len; -} - -ATAgentType GetATAgent(const char *agent_name) -{ - struct ATAgent* result = NULL; - for (int i = 0; i < AT_AGENT_MAX; i++){ - if (strcmp(at_agent_table[i].agent_name, agent_name) == 0){ - result = &at_agent_table[i]; - } - } - - return result; -} - - -static int DeleteATAgent(ATAgentType agent) -{ - if (agent->lock){ - UserMutexDelete(agent->lock); - } - - if (agent->entm_rx_notice){ - UserSemaphoreDelete(agent->entm_rx_notice); - } - - if (agent->fd > 0){ - close(agent->fd); - } - - if (agent->rsp_sem){ - UserSemaphoreDelete(agent->rsp_sem); - } - - if (agent->maintain_buffer){ - free(agent->maintain_buffer); - } - - memset(agent, 0x00, sizeof(struct ATAgent)); -} - -static void ATAgentReceiveProcess(void *param) -{ - ATAgentType agent = (ATAgentType)param; - const struct at_urc *urc; - - while (1){ - if (GetCompleteATReply(agent) > 0){ - if (agent->reply != NULL){ - ATReplyType reply = agent->reply; - - agent->maintain_buffer[agent->maintain_len - 1] = '\0'; - - if (agent->maintain_len < reply->reply_max_len){ - memcpy(reply->reply_buffer, agent->maintain_buffer, agent->maintain_len); - - reply->reply_len = agent->maintain_len; - } - else{ - printf("out of memory (%d)!", reply->reply_max_len); - } - - agent->reply = NULL; - UserSemaphoreAbandon(agent->rsp_sem); - } - } - } -} - -static int ATAgentInit(ATAgentType agent) -{ - int result = EOK; - UtaskType at_utask; - do - { - agent->maintain_len = 0; - agent->maintain_buffer = (char *)malloc(agent->maintain_max); - - if (agent->maintain_buffer == NONE){ - break; - } - - agent->entm_rx_notice = UserSemaphoreCreate(0); - if (agent->entm_rx_notice == 0){ - break; - } - - agent->rsp_sem = UserSemaphoreCreate(0); - if (agent->rsp_sem == 0){ - break; - } - - agent->lock = UserMutexCreate(); - if (agent->lock == 0){ - break; - } - - agent->receive_mode = ENTM_MODE; - - strncpy(at_utask.name, "recv_task", strlen("recv_task")); - at_utask.func_entry = ATAgentReceiveProcess; - at_utask.func_param = agent; - at_utask.stack_size = 1024; - at_utask.prio = 18; - - agent->at_handler = UserTaskCreate(at_utask); - - if (agent->at_handler == 0) { - break; - } - - result = EOK; - return result; - } while (1); - - DeleteATAgent(agent); - result = -ERROR; - return result; -} - - - -static int SerialBusCheck(struct ATAgent *agent, const char *device_name, struct SerialCfgParam *pserial_cfg) -{ - int ret = EOK; - - agent->fd = open(device_name,O_RDWR); - - if (!pserial_cfg) { - struct SerialDataCfg data_cfg; - memset(&data_cfg, 0, sizeof(struct SerialDataCfg)); - data_cfg.serial_baud_rate = 57600; - ioctl(agent->fd, OPE_INT, &data_cfg); - - return EOK; - } - - ioctl(agent->fd, OPE_INT, &pserial_cfg->data_cfg); - - return EOK; -} - -int InitATAgent(const char *agent_name, const char *device_name, uint32 maintain_max, struct SerialCfgParam *pserial_cfg) -{ - int i = 0; - int result = EOK; - int open_result = EOK; - struct ATAgent *agent = NONE; - - if (GetATAgent(agent_name) != NULL) { - return result; - } - - while (i < AT_AGENT_MAX && at_agent_table[i].fd > 0) { - i++; - } - - if (i >= AT_AGENT_MAX) { - printf("agent buffer(%d) is full.", AT_AGENT_MAX); - result = -ERROR; - return result; - } - - agent = &at_agent_table[i]; - - SerialBusCheck(agent, device_name, pserial_cfg); - - strcpy(agent->agent_name, agent_name); - - agent->maintain_max = maintain_max; - - result = ATAgentInit(agent); - if (result == EOK) - { - UserTaskStartup(agent->at_handler); - } - - return result; -} - -ATReplyType CreateATReply(uint32 reply_max_len) -{ - ATReplyType reply = NULL; - - reply = (ATReplyType)malloc(sizeof(struct ATReply)); - if (reply == NULL){ - printf("no more memory\n"); - return NULL; - } - - reply->reply_max_len = reply_max_len; - - reply->reply_buffer = (char *)malloc(reply_max_len); - if (reply->reply_buffer == NULL){ - printf("no more memory\n"); - free(reply); - return NULL; - } - - return reply; -} - -void DeleteATReply(ATReplyType reply) -{ - if (reply){ - if (reply->reply_buffer){ - free(reply->reply_buffer); - reply->reply_buffer = NULL; - } - } - - if (reply){ - free(reply); - reply = NULL; - } -} - - - diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/src/xs_adapter_manager.c b/Ubiquitous/XiUOS/framework/connection/Adapter/src/xs_adapter_manager.c deleted file mode 100644 index d8f3eb1e..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/src/xs_adapter_manager.c +++ /dev/null @@ -1,169 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_manager.c - * @brief manage adapter list - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include "xs_adapter_manager.h" -#include -#include -#include -#include -#ifdef CONNECTION_COMMUNICATION_ZIGBEE -#include -#endif -#ifdef CONNECTION_COMMUNICATION_BLUETOOTH -#include -#endif -#ifdef CONNECTION_COMMUNICATION_LORA -#include -#endif - - -// Zigbee Adapter List -#ifdef CONNECTION_COMMUNICATION_ZIGBEE -static DoubleLinklistType zigbee_adapter_list; - -void ZigbeeAdapterInit() -{ - InitDoubleLinkList(&zigbee_adapter_list); -} - -void ZigbeeAdapterRegister(adapter_t padapter) -{ - DoubleLinkListInsertNodeAfter(&zigbee_adapter_list, &(padapter->link)); -} - -void* ZigbeeAdapterFind(char* name) -{ - DoubleLinklistType* pnode = NONE; - DoubleLinklistType* phead = &zigbee_adapter_list; - struct AdapterZigbee* padapter = NONE; - - for(pnode = phead->node_next; pnode != phead; pnode = pnode->node_next) { - padapter = (struct AdapterZigbee*)SYS_DOUBLE_LINKLIST_ENTRY(pnode, struct Adapter, link); - // KPrintf("ZigbeeReceiveDemo bbb\n"); - if (0 == strcmp(padapter->name, name)){ - return padapter; - } - } - - return padapter; -} -#endif - - -// Bluetooth Adapter List -#ifdef CONNECTION_COMMUNICATION_BLUETOOTH -static DoubleLinklistType bluetooth_adapter_list; - -void BluetoothAdapterInit() -{ - InitDoubleLinkList(&bluetooth_adapter_list); -} - -void BluetoothAdapterRegister(adapter_t padapter) -{ - DoubleLinkListInsertNodeAfter(&bluetooth_adapter_list, &(padapter->link)); -} - -void* BluetoothAdapterFind(char* name) -{ - DoubleLinklistType* pnode = NONE; - DoubleLinklistType* phead = &bluetooth_adapter_list; - struct AdapterBluetooth* padapter = NONE; - - for(pnode = phead->node_next; pnode != phead; pnode = pnode->node_next) { - padapter = (struct AdapterBluetooth*)SYS_DOUBLE_LINKLIST_ENTRY(pnode, struct Adapter, link); - // KPrintf("BluetoothReceiveDemo bbb\n"); - if (0 == strcmp(padapter->name, name)){ - return padapter; - } - } - - return padapter; -} -#endif - - - -// Lora Adapter List -#ifdef CONNECTION_COMMUNICATION_LORA -static DoubleLinklistType lora_adapter_list; - -void LoraAdapterInit() -{ - InitDoubleLinkList(&lora_adapter_list); -} - -void LoraAdapterRegister(adapter_t padapter) -{ - DoubleLinkListInsertNodeAfter(&lora_adapter_list, &(padapter->link)); -} - -void* LoraAdapterFind(char* name) -{ - DoubleLinklistType* pnode = NONE; - DoubleLinklistType* phead = &lora_adapter_list; - struct AdapterLora* padapter = NONE; - - for(pnode = phead->node_next; pnode != phead; pnode = pnode->node_next) { - padapter = (struct AdapterLora*)SYS_DOUBLE_LINKLIST_ENTRY(pnode, struct Adapter, link); - - if (0 == strcmp(padapter->name, name)) { - return padapter; - } - } - - return padapter; -} -#endif - -// AT Adapter List -static DoubleLinklistType at_adapter_list; -static int at_adapter_list_inited = 0; -void ATAdapterInit() -{ - if(!at_adapter_list_inited){ - InitDoubleLinkList(&at_adapter_list); - at_adapter_list_inited = 1; - } -} - -void ATAdapterRegister(struct AdapterAT* at_adapter) -{ - DoubleLinkListInsertNodeAfter(&at_adapter_list, &(at_adapter->link)); -} - -void* ATAdapterFind(uint32 adapter_id) -{ - DoubleLinklistType* pnode = NONE; - DoubleLinklistType* phead = &at_adapter_list; - struct AdapterAT* padapter = NONE; - - - for(pnode = phead->node_next; pnode != phead; pnode = pnode->node_next) { - padapter = (struct AdapterAT*)SYS_DOUBLE_LINKLIST_ENTRY(pnode, struct AdapterAT, link); - - if (padapter->at_adapter_id == adapter_id){ - return padapter; - } - } - - return NULL; -} - diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/Makefile deleted file mode 100644 index 75fb1f78..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -SRC_FILES += xs_adapter_at_wifi.c \ - xs_adapter_at_wifi_register.c \ - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/xs_adapter_at_wifi.c b/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/xs_adapter_at_wifi.c deleted file mode 100644 index 5f8cd3e4..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/xs_adapter_at_wifi.c +++ /dev/null @@ -1,482 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_wifi.c - * @brief HFA21 wifi driver base connection framework - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include -#include -#include -#include -#include -#include - -int WifiSetDown(struct AdapterAT *adapter_at); - -/** - * @description: Close wifi - * @param padapter - wifi device pointer - */ -void WifiClose(struct Adapter *padapter) -{ - WifiSetDown((struct AdapterAT *)padapter); -} - -/** - * @description: Open wifi - * @param padapter - wifi device pointer - * @return success: EOK, failure: -ERROR - */ -int WifiOpen(struct Adapter *padapter) -{ - uint32 result; - char *agent_name = "uart3_client"; - const char *device_name = WIFI_UART_NAME; - - if (InitATAgent(agent_name, device_name, 512, NULL)) { - printf("at_client_init failed ! \n"); - result = -ERROR; - return result; - } - - ATAgentType at_agent = GetATAgent(agent_name); - if (NULL == at_agent) { - printf("GetATAgent failed ! \n"); - return -ERROR; - } - UserTaskDelay(5000); - struct AdapterAT *wifi_at_adapter = (struct AdapterAT *)padapter; - wifi_at_adapter->agent = at_agent; -} - -int WifiSend(struct Adapter *padapter, const char *data, int len, bool block, int time_out, int delay, send_success cb, void *param, void* p) -{ - struct AdapterAT *wifi_at_adapter = (struct AdapterAT *)padapter; - - if (wifi_at_adapter->agent) { - EntmSend(wifi_at_adapter->agent, data, len); - } -} - -int WifiReceive(struct Adapter *padapter, char *rev_buffer, int buffer_len, int time_out, bool block, void* p) -{ - printf("wifi receive waiting ... \n"); - - struct AdapterAT *wifi_at_adapter = (struct AdapterAT *)padapter; - if (wifi_at_adapter->agent) { - return EntmRecv(wifi_at_adapter->agent, rev_buffer, buffer_len, 40000); - } else { - printf("Can not find agent \n"); - } -} - -uint32 WifiInitAtCmd(ATAgentType at_agent) -{ - uint32 result; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at_create_resp failed ! \n"); - result = -ERROR; - goto __exit; - } - - ATOrderSend(at_agent, REPLY_TIME_OUT, NULL, "+++"); - UserTaskDelay(100); - - ATOrderSend(at_agent, REPLY_TIME_OUT, NULL, "a"); - - UserTaskDelay(500); - - return result; - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -static void WifiSetUpAdapter(void *parameter) -{ - #define INIT_RETRY 5 - #define LEN_PARA_BUF 128 - uint8 server_addr_wifi[LEN_PARA_BUF] = "192.168.1.183"; - uint8 server_port_wifi[LEN_PARA_BUF] = "12345"; - uint8 wifi_ssid[LEN_PARA_BUF] = "AIIT-Guest"; - uint8 wifi_pwd[LEN_PARA_BUF] = ""; - char cmd[LEN_PARA_BUF]; - - struct AdapterAT *adapter_at = (struct AdapterAT *) parameter; - - //struct at_device_esp8266 *esp8266 = (struct at_device_esp8266 *) device->UserData; - struct ATAgent *agent = adapter_at->agent; - ATReplyType reply = NONE; - x_err_t result = EOK; - x_size_t retry_num = INIT_RETRY; - - /* wait hfa21 device startup finish */ - UserTaskDelay(5000); - - reply = CreateATReply(64); - if (reply == NONE) { - printf("no memory for reply create."); - return; - } - - while (retry_num--) { - WifiInitAtCmd(agent); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "AT+FCLR\r"); - UserTaskDelay(20000); - - WifiInitAtCmd(agent); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WANN\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WSSSID="); - strcat(cmd,wifi_ssid); - strcat(cmd,"\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WSKEY=WPA2PSK,AES,"); - strcat(cmd,wifi_pwd); - strcat(cmd,"\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcpy(cmd,"AT+WMODE=sta\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(2500); - - memset(cmd,0,sizeof(cmd)); - strcat(cmd,"AT+Z\r"); - ATOrderSend(agent, REPLY_TIME_OUT, NULL, cmd); - UserTaskDelay(5000); - - /* initialize successfully */ - result = EOK; - break; - - __exit: - if (result != EOK) { - UserTaskDelay(1000); - } - } - - if (reply) { - DeleteATReply(reply); - } -} - -int WifiSetDown(struct AdapterAT *adapter_at) -{ - WifiInitAtCmd(adapter_at->agent); - - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, NULL, "AT+FCLR\r"); - UserTaskDelay(20000); -} - -int WifiSetUp(struct AdapterAT *adapter_at) -{ - WifiSetUpAdapter(adapter_at); - - return EOK; -} - -int WifiSetAddr(struct AdapterAT *adapter_at, uint ip, uint gateway, uint netmask) -{ - #define HFA21_SET_ADDR_EXPRESSION "+ok=%[^,],%[^,],%[^,],%[^,]\r" - char *dhcp_mode =NULL; - char *ip_str = NULL; - /* role type(server/agent) */ - char *gw_str = NULL; - char *mask_str = NULL; - - dhcp_mode = (char *) UserCalloc(1, 8); - ip_str = (char *) UserCalloc(1, 17); - gw_str = (char *) UserCalloc(1, 17); - mask_str = (char *) UserCalloc(1, 17); - - WifiInitAtCmd(adapter_at->agent); - - uint32 result = EOK; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at_create_resp failed ! \n"); - result = -ERROR; - goto __exit; - } - - uint32 err = ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, NULL, "AT+WANN=%s,%s,%s,%s\r", "static", IpTstr(ip), IpTstr(netmask), IpTstr(gateway)); - if (err) { - printf("ATOrderSend(AT+PING)failed ! err = %d\n", err); - result = -ERROR; - goto __exit; - } - - UserTaskDelay(2500); - - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, reply, "AT+WANN\r"); - UserTaskDelay(2500); - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, NULL, "AT+Z\r"); - UserTaskDelay(5000); - - const char * result_buf = GetReplyText(reply); - if (!result_buf) { - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - char* str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - - ParseATReply(str, HFA21_SET_ADDR_EXPRESSION, dhcp_mode,ip_str,mask_str,gw_str); - printf("after configure:\n mode:%s\n ip:%s\n netmask:%s\n gateway:%s\n", dhcp_mode, ip_str, mask_str, gw_str); - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -int WifiDHCP(struct AdapterAT *adapter_at, bool enable) -{ - int result = EOK; - ATReplyType reply = NONE; - char dhcp_status[4]; - memset(dhcp_status,0,sizeof(dhcp_status)); - if(enable) - strcpy(dhcp_status,"on"); - else - strcpy(dhcp_status,"off"); - - reply = CreateATReply(64); - if (reply == NONE) { - printf("no memory for reply struct."); - return -ENOMEMORY; - } - - WifiInitAtCmd(adapter_at->agent); - - char cmd[LEN_PARA_BUF]; - memset(cmd, 0, sizeof(cmd)); - strcpy(cmd, "AT+DHCPDEN="); - strcat(cmd, dhcp_status); - strcat(cmd, "\r"); - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, reply, cmd); - UserTaskDelay(2500); - - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, NULL, "AT+Z\r"); - UserTaskDelay(2500); - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -int WifiPing(struct AdapterAT *adapter_at, const char *destination,struct PingResult *ping_resp) -{ - char *ping_result = NONE; - char *dst = NONE; - ping_result = (char *) UserCalloc(1, 17); - dst = (char *) UserCalloc(1, 17); - strcpy(dst, destination); - strcat(dst, "\r"); - - WifiInitAtCmd(adapter_at->agent); - - uint32 result = EOK; - - ATReplyType reply = CreateATReply(64); - if (NULL == reply) { - printf("at_create_resp failed ! \n"); - result = -ERROR; - goto __exit; - } - - printf("\\r = 0x%x, \\n = 0x%x\n", '\r', '\n'); - - //ping baidu.com - uint32 err = ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, reply, "AT+PING=%s", dst); - if (err) { - printf("ATOrderSend(AT+PING)failed ! err = %d\n", err); - result = -ERROR; - goto __exit; - } - - UserTaskDelay(2500); - - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, NULL, "AT+Z\r"); - UserTaskDelay(2000); - - const char * result_buf = GetReplyText(reply); - if (!result_buf) { - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - char* str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - - ParseATReply(str, "+ok=%s\r", ping_result); - - printf("ping www.baidu.com(36.152.44.95) result is:%s\n", ping_result); - -__exit: - if (reply) { - DeleteATReply(reply); - } - - return result; -} - -int WifiNetstat(struct AdapterAT *adapter_at) -{ - #define HFA21_NETSTAT_RESP_SIZE 320 - #define HFA21_NETSTAT_TYPE_SIZE 10 - #define HFA21_NETSTAT_IPADDR_SIZE 17 - #define HFA21_WANN_EXPRESSION "+ok=%[^,],%[^,],%[^,],%[^,]\r" - #define HFA21_LANN_EXPRESSION "+ok=%[^,],%[^,]\r" - #define HFA21_WMODE_EXPRESSION "+ok=%s\r" - - ATReplyType reply = NULL; - struct ATAgent *agent = adapter_at->agent; - uint32 result; - char * result_buf = NULL; - char * str = NULL; - - /* sta/ap */ - char *work_mode = NULL; - /* dhcp/static */ - char *ip_mode = NULL; - char *local_ipaddr = NULL; - char *gateway = NULL; - char *netmask = NULL; - local_ipaddr = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - gateway = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - netmask = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - work_mode = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - ip_mode = (char *) UserCalloc(1, HFA21_NETSTAT_IPADDR_SIZE); - - reply = CreateATReply(HFA21_NETSTAT_RESP_SIZE); - if (reply == NULL) { - //printf("no memory for reply create.", device->name); - goto __exit; - } - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "+++"); - UserTaskDelay(100); - - ATOrderSend(agent, REPLY_TIME_OUT, NULL, "a"); - UserTaskDelay(2500); - - if (ATOrderSend(agent, REPLY_TIME_OUT, reply, "AT+WMODE\r") < 0) { - goto __exit; - } - - UserTaskDelay(2500); - - result_buf = GetReplyText(reply); - if(!result_buf) { - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - /* parse the third line of response data, get the network connection information */ - ParseATReply(str, HFA21_WMODE_EXPRESSION, work_mode); - - if (work_mode[0]=='S') { - if (ATOrderSend(agent, REPLY_TIME_OUT, reply, "AT+WANN\r") < 0) { - goto __exit; - } - - UserTaskDelay(2500); - - result_buf = GetReplyText(reply); - if (!result_buf) { - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - /* parse the third line of response data, get the network connection information */ - ParseATReply(str, HFA21_WANN_EXPRESSION, ip_mode, local_ipaddr, netmask, gateway); - } else { - if (ATOrderSend(agent, REPLY_TIME_OUT, reply, "AT+LANN\r") < 0) { - goto __exit; - } - - UserTaskDelay(2500); - - result_buf = GetReplyText(reply); - if (!result_buf) { - printf("send_dhcp_at_cmd AT+ result_buf = NULL"); - result = -ERROR; - goto __exit; - } - - str = strstr(result_buf, "+ok="); - printf("str is:%s\n",str); - /* parse the third line of response data, get the network connection information */ - ParseATReply(str, HFA21_LANN_EXPRESSION, local_ipaddr, netmask); - } - - ATOrderSend(adapter_at->agent, REPLY_TIME_OUT, NULL, "AT+Z\r"); - UserTaskDelay(2500); - - printf("work mode: %s\n", work_mode); - if (work_mode[0]=='S') - printf("ip mode: %s\nlocal ip: %s\nnetmask: %s\ngateway: %s\n", ip_mode, local_ipaddr, netmask, gateway); - else - printf("local ip: %s\nnetmask: %s\n", local_ipaddr, netmask); - - return EOK; - -__exit: - if (reply) - DeleteATReply(reply); - if (local_ipaddr) - UserFree(local_ipaddr); - if (netmask) - UserFree(netmask); - if (gateway) - UserFree(gateway); - if (work_mode) - UserFree(work_mode); -} diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/xs_adapter_at_wifi_register.c b/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/xs_adapter_at_wifi_register.c deleted file mode 100644 index 6735c6da..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/wifi/xs_adapter_at_wifi_register.c +++ /dev/null @@ -1,73 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** - * @file xs_adapter_at_wifi_register.c - * @brief HFA21 wifi driver register to connection framework - * @version 1.0 - * @author AIIT XUOS Lab - * @date 2021.04.22 - */ - -#include -#include -#include -#include -#include - -const struct AdapterDone wifi_adapter_done = -{ - WifiClose, - WifiOpen, - NULL, - WifiSend, - WifiReceive, - NULL, -}; - -const struct ATDone wifi_at_done = -{ - WifiSetUp, - WifiSetDown, - WifiSetAddr, - NULL, - WifiDHCP, - WifiPing, - WifiNetstat, - NULL, -}; - -/** - * @description: Register wifi device - * @return success: EOK, failure: ERROR - */ -int RegisterAdapterWifi(void) -{ - struct Adapterwifi *wifi_adapter = malloc(sizeof(struct Adapterwifi)); - if (wifi_adapter == NULL) { - printf("out of memory\n"); - return ERROR; - } - - struct AdapterAT *wifi_at_adapter = (struct AdapterAT *)wifi_adapter; - struct Adapter *adapter = (struct Adapter *)wifi_adapter; - - wifi_adapter->parent.atdone = wifi_at_done; - wifi_adapter->parent.parent.done = wifi_adapter_done; - - wifi_at_adapter->at_adapter_id = WIFI_ADAPTER_ID; - - ATAdapterInit(); - ATAdapterRegister(wifi_at_adapter); - - return EOK; -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/Kconfig b/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/Kconfig deleted file mode 100644 index 5e59af4b..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -menuconfig CONNECTION_COMMUNICATION_ZIGBEE_AIIT -bool "Enable zigbee for AIIT Board" -default n - -menuconfig CONNECTION_COMMUNICATION_ZIGBEE_KD233 -bool "Enable zigbee for kd233" -default n - -menuconfig CONNECTION_COMMUNICATION_ZIGBEE_STM32 -bool "Enable zigbee for STM32F407-DISCOVERY" -default n \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/Makefile b/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/Makefile deleted file mode 100644 index 86e2b023..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_FILES := xs_adapter_zigbee.c xs_adaper_zigbee_register.c - -include $(KERNEL_ROOT)/compiler.mk \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/xs_adaper_zigbee_register.c b/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/xs_adaper_zigbee_register.c deleted file mode 100644 index cdb8bf9c..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/xs_adaper_zigbee_register.c +++ /dev/null @@ -1,45 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** -* @file: xs_AdaperZigbee_register.c -* @brief: register zigbee in initialization -* @version: 1.0 -* @author: AIIT XUOS Lab -* @date: 2021/4/30 -* -*/ -#include -#include -#include -/* initialize to the register list*/ -int RegisterAdapterZigbee(void) -{ - static struct AdapterZigbee zigbee_adapter; - memset(&zigbee_adapter, 0, sizeof(zigbee_adapter)); - - static struct AdapterDone zigbee_send_done = { - .NetAiitOpen = ZigbeeOpen, - .NetAiitClose = ZigbeeClose, - .NetAiitSend = ZigbeeSend, - .NetAiitReceive = ZigbeeReceive, - .NetAiitJoin = NULL, - .NetAiitIoctl = NULL, - }; - zigbee_adapter.parent.done = zigbee_send_done; - zigbee_adapter.name = "zigbee"; - - ZigbeeAdapterInit(); - ZigbeeAdapterRegister((adapter_t)&zigbee_adapter); - - return EOK; -} \ No newline at end of file diff --git a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/xs_adapter_zigbee.c b/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/xs_adapter_zigbee.c deleted file mode 100644 index 81a011da..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Adapter/zigbee/xs_adapter_zigbee.c +++ /dev/null @@ -1,190 +0,0 @@ -/* -* Copyright (c) 2020 AIIT XUOS Lab -* XiUOS is licensed under Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* http://license.coscl.org.cn/MulanPSL2 -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -/** -* @file: xs_AdapterZigbee.c -* @brief: zigbee open close function -* @version: 1.0 -* @author: AIIT XUOS Lab -* @date: 2021/4/30 -* -*/ -#include -#include "xs_adapter_zigbee.h" -#include -#include -#include -#include -#ifdef CONNECTION_COMMUNICATION_ZIGBEE_AIIT -#define SAMPLE_UART_NAME "/dev/extuart_dev0" -int use_aiit = 1; -#endif -#ifdef CONNECTION_COMMUNICATION_ZIGBEE_KD233 -#define SAMPLE_UART_NAME "/dev/uart3_dev3" -int use_aiit = 0; -#endif -#ifdef CONNECTION_COMMUNICATION_ZIGBEE_STM32 -#define SAMPLE_UART_NAME "/dev/usart3_dev3" -int use_aiit = 0; -#endif - -static int serial_fd; -static int32_t zigbeereceive; -static int rx_sem; -char zigbee_buffer[NAME_NUM_MAX ]={0}; - -/* initialize srial port to open zigbee*/ -int ZigbeeOpen(struct Adapter *padapter) -{ - - /* Open device in read-write mode */ - serial_fd = open(SAMPLE_UART_NAME,O_RDWR); - - /* set serial config, serial_baud_rate = 115200 */ - - struct SerialDataCfg cfg; - cfg.serial_baud_rate = BAUD_RATE_115200; - cfg.serial_data_bits = DATA_BITS_8; - cfg.serial_stop_bits = STOP_BITS_1; - cfg.serial_parity_mode = PARITY_NONE; - cfg.serial_bit_order = 0; - cfg.serial_invert_mode = 0; - cfg.serial_buffer_size = 128; - - /*aiit board use ch438, so it nees more serial configuration*/ - if (use_aiit==1){ - cfg.ext_uart_no = 0; - cfg.port_configure = 0; - } - - ioctl(serial_fd, 0, &cfg); - UserTaskDelay(1000); - printf("Zigbee ready\n"); - - return 0; -} - -/* send message to srial port*/ -int ZigbeeSend(struct Adapter *padapter, const char* data, int len, bool block, int time_out, int delay, send_success cb, void* param, void* reserved) -{ - write(serial_fd,data,strlen(data)); - - - return 0; -} - - -/*thread to read message from srial port*/ -void SerialThreadEntry(void *parameter) -{ - char ch; - int i = 0; - int n; - int run = 0; - while (1){ - n = read(serial_fd,&ch,1); - if (n>0){ - if (ch == '~'){ - UserSemaphoreAbandon(rx_sem); - run = 1; - break; - } - zigbee_buffer[i++] = ch; - } - if (run ==1) - break; - } -} - -int ZigbeeReceive(struct Adapter *padapter, char* rev_buffer, int buffer_len,int time_out, bool block, void* reserved) -{ - - x_err_t ret = EOK; - /* Set callback function */ - /* Create thread serial */ - UtaskType recv; - recv.name[0] = 'z'; - recv.func_entry = SerialThreadEntry; - recv.func_param = NONE; - recv.stack_size = 1024; - recv.prio = 25; - memset(zigbee_buffer, 0, sizeof(zigbee_buffer)); - - /* Initialize semaphore */ - rx_sem = UserSemaphoreCreate(0); - - zigbeereceive = UserTaskCreate(recv); - UserTaskStartup(zigbeereceive); - - /*copy to the receive buffer*/ - UserSemaphoreObtain(rx_sem,-1); - memcpy(rev_buffer,zigbee_buffer,strlen(zigbee_buffer)+1 ); - - return ret; - -} - -void ZigbeeSettingDemo(int argc, char *argv[]) -{ - - adapter_t padapter = ZigbeeAdapterFind("zigbee"); - if (NONE == padapter){ - KPrintf("adapter find failed!\n"); - return; - } - /*Open adapter*/ - if (0 != padapter->done.NetAiitOpen(padapter)){ - KPrintf("adapter open failed!\n"); - return; - } - - ZigbeeOpen(padapter); - /*zigbee communication settings*/ - /*it can be changed if needed*/ - char *set0 = "+++"; - char *set1_1 = "AT+DEV=C"; /*set device type for coordinater*/ - char *set1_2 = "AT+DEV=E"; /*set device type for end device*/ - char *set2 = "AT+MODE=1"; /*device mode 1 : passthrough */ - char *set3 = "AT+PANID=A1B2"; /* set PANID*/ - char *set4 = "AT+CH=11"; /* set channel*/ - char *set5 = "AT+EXIT"; /* exit AT mode*/ - write(serial_fd,set0,strlen(set0)); - UserTaskDelay(1000); - /*type something in the command line to set this zigbee as coordinater*/ - /*otherwise it is an end device*/ - if (argc == 2){ - write(serial_fd,set1_1,strlen(set1_1)); - UserTaskDelay(1000); /*zigbee needs some time to process input message*/ - }else{ - write(serial_fd,set1_2,strlen(set1_2)); - UserTaskDelay(1000); - } - write(serial_fd,set2,strlen(set2)); - UserTaskDelay(1000); - write(serial_fd,set3,strlen(set3)); - UserTaskDelay(1000); - write(serial_fd,set4,strlen(set4)); - UserTaskDelay(1000); - write(serial_fd,set5,strlen(set5)); - UserTaskDelay(1000); - printf("zigbee setting success!\n"); -} -#ifndef SEPARATE_COMPILE -SHELL_EXPORT_CMD(SHELL_CMD_PERMISSION(0)|SHELL_CMD_TYPE(SHELL_TYPE_CMD_MAIN), -ZigbeeSettingDemo, ZigbeeSettingDemo, zigbee send function ); -#endif - -void ZigbeeClose(struct Adapter *padapter) -{ - UserTaskDelete(zigbeereceive); - close(serial_fd); -} diff --git a/Ubiquitous/XiUOS/framework/connection/Kconfig b/Ubiquitous/XiUOS/framework/connection/Kconfig deleted file mode 100644 index 64e00349..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -menu "connection" - -menuconfig CONNECTION_ADAPTER -bool "Enable adapter" -default n -if CONNECTION_ADAPTER -source "$KERNEL_DIR/framework/connection/Adapter/Kconfig" -endif - - -endmenu diff --git a/Ubiquitous/XiUOS/framework/connection/Makefile b/Ubiquitous/XiUOS/framework/connection/Makefile deleted file mode 100644 index a7d7ce08..00000000 --- a/Ubiquitous/XiUOS/framework/connection/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -ifeq ($(CONFIG_CONNECTION_ADAPTER), y) -SRC_DIR += Adapter -endif - -include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/control/Kconfig b/Ubiquitous/XiUOS/framework/control/Kconfig deleted file mode 100644 index b33ab92b..00000000 --- a/Ubiquitous/XiUOS/framework/control/Kconfig +++ /dev/null @@ -1,2 +0,0 @@ -menu "Control" -endmenu diff --git a/Ubiquitous/XiUOS/framework/intelligent/Kconfig b/Ubiquitous/XiUOS/framework/intelligent/Kconfig deleted file mode 100644 index 99728b10..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/Kconfig +++ /dev/null @@ -1,5 +0,0 @@ -menu "Intelligence" - -source "$KERNEL_DIR/framework/intelligent/tflite-mcu/Kconfig" - -endmenu diff --git a/Ubiquitous/XiUOS/framework/intelligent/Makefile b/Ubiquitous/XiUOS/framework/intelligent/Makefile deleted file mode 100644 index 80a67299..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -SRC_DIR := tflite-mcu - -include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/Kconfig b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/Kconfig deleted file mode 100644 index 823c47fb..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/Kconfig +++ /dev/null @@ -1,4 +0,0 @@ -config INTELLIGENT_TFLITE - bool "Tensorflow Lite for Micro" - select LIB_CPLUSPLUS - default n diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/Makefile b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/Makefile deleted file mode 100644 index af5b761a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/Makefile +++ /dev/null @@ -1,89 +0,0 @@ -TFLITE_SRCS = \ - source/tensorflow/lite/micro/all_ops_resolver.cc \ - source/tensorflow/lite/micro/debug_log.cc \ - source/tensorflow/lite/micro/memory_helpers.cc \ - source/tensorflow/lite/micro/micro_allocator.cc \ - source/tensorflow/lite/micro/micro_error_reporter.cc \ - source/tensorflow/lite/micro/micro_interpreter.cc \ - source/tensorflow/lite/micro/micro_profiler.cc \ - source/tensorflow/lite/micro/micro_string.cc \ - source/tensorflow/lite/micro/micro_time.cc \ - source/tensorflow/lite/micro/micro_utils.cc \ - source/tensorflow/lite/micro/recording_micro_allocator.cc \ - source/tensorflow/lite/micro/recording_simple_memory_allocator.cc \ - source/tensorflow/lite/micro/simple_memory_allocator.cc \ - source/tensorflow/lite/micro/test_helpers.cc \ - source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc \ - source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc \ - source/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc \ - source/tensorflow/lite/micro/testing/test_conv_model.cc \ - source/tensorflow/lite/c/common.c \ - source/tensorflow/lite/core/api/error_reporter.cc \ - source/tensorflow/lite/core/api/flatbuffer_conversions.cc \ - source/tensorflow/lite/core/api/op_resolver.cc \ - source/tensorflow/lite/core/api/tensor_utils.cc \ - source/tensorflow/lite/kernels/internal/quantization_util.cc \ - source/tensorflow/lite/kernels/kernel_util.cc \ - source/tensorflow/lite/schema/schema_utils.cc \ - source/tensorflow/lite/micro/kernels/activations.cc \ - source/tensorflow/lite/micro/kernels/arg_min_max.cc \ - source/tensorflow/lite/micro/kernels/ceil.cc \ - source/tensorflow/lite/micro/kernels/circular_buffer.cc \ - source/tensorflow/lite/micro/kernels/comparisons.cc \ - source/tensorflow/lite/micro/kernels/concatenation.cc \ - source/tensorflow/lite/micro/kernels/conv_test_common.cc \ - source/tensorflow/lite/micro/kernels/dequantize.cc \ - source/tensorflow/lite/micro/kernels/detection_postprocess.cc \ - source/tensorflow/lite/micro/kernels/elementwise.cc \ - source/tensorflow/lite/micro/kernels/ethosu.cc \ - source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cc \ - source/tensorflow/lite/micro/kernels/floor.cc \ - source/tensorflow/lite/micro/kernels/hard_swish.cc \ - source/tensorflow/lite/micro/kernels/kernel_runner.cc \ - source/tensorflow/lite/micro/kernels/kernel_util.cc \ - source/tensorflow/lite/micro/kernels/l2norm.cc \ - source/tensorflow/lite/micro/kernels/logical.cc \ - source/tensorflow/lite/micro/kernels/logistic.cc \ - source/tensorflow/lite/micro/kernels/maximum_minimum.cc \ - source/tensorflow/lite/micro/kernels/neg.cc \ - source/tensorflow/lite/micro/kernels/pack.cc \ - source/tensorflow/lite/micro/kernels/pad.cc \ - source/tensorflow/lite/micro/kernels/prelu.cc \ - source/tensorflow/lite/micro/kernels/quantize.cc \ - source/tensorflow/lite/micro/kernels/quantize_common.cc \ - source/tensorflow/lite/micro/kernels/reduce.cc \ - source/tensorflow/lite/micro/kernels/reshape.cc \ - source/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc \ - source/tensorflow/lite/micro/kernels/round.cc \ - source/tensorflow/lite/micro/kernels/shape.cc \ - source/tensorflow/lite/micro/kernels/split.cc \ - source/tensorflow/lite/micro/kernels/split_v.cc \ - source/tensorflow/lite/micro/kernels/strided_slice.cc \ - source/tensorflow/lite/micro/kernels/sub.cc \ - source/tensorflow/lite/micro/kernels/svdf_common.cc \ - source/tensorflow/lite/micro/kernels/tanh.cc \ - source/tensorflow/lite/micro/kernels/transpose_conv.cc \ - source/tensorflow/lite/micro/kernels/unpack.cc \ - source/tensorflow/lite/micro/kernels/add.cc \ - source/tensorflow/lite/micro/kernels/conv.cc \ - source/tensorflow/lite/micro/kernels/depthwise_conv.cc \ - source/tensorflow/lite/micro/kernels/fully_connected.cc \ - source/tensorflow/lite/micro/kernels/mul.cc \ - source/tensorflow/lite/micro/kernels/pooling.cc \ - source/tensorflow/lite/micro/kernels/softmax.cc \ - source/tensorflow/lite/micro/kernels/svdf.cc - -ifeq ($(CONFIG_INTELLIGENT_TFLITE),y) - SRC_FILES := $(TFLITE_SRCS) tf_fini_fix.c - #CPPPATHS += \ - -Isource \ - -Isource/third_party/gemmlowp \ - -Isource/third_party/flatbuffers/include \ - -Isource/third_party/ruy - DEFINES += -DTF_LITE_USE_GLOBAL_CMATH_FUNCTIONS \ - -DTF_LITE_USE_GLOBAL_MAX \ - -DTF_LITE_USE_GLOBAL_MIN \ - -DTF_LITE_STATIC_MEMORY -endif - -include $(KERNEL_ROOT)/compiler.mk diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/LICENSE b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/LICENSE deleted file mode 100644 index 40f8c347..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ -Copyright 2019 The TensorFlow Authors. All rights reserved. - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/core/public/version.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/core/public/version.h deleted file mode 100644 index dd909d86..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/core/public/version.h +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_CORE_PUBLIC_VERSION_H_ -#define TENSORFLOW_CORE_PUBLIC_VERSION_H_ - -// TensorFlow uses semantic versioning, see http://semver.org/. - -// Also update tensorflow/tensorflow.bzl and -// tensorflow/tools/pip_package/setup.py -#define TF_MAJOR_VERSION 2 -#define TF_MINOR_VERSION 5 -#define TF_PATCH_VERSION 0 - -// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1", -// "-beta", "-rc", "-rc.1") -#define TF_VERSION_SUFFIX "" - -#define TF_STR_HELPER(x) #x -#define TF_STR(x) TF_STR_HELPER(x) - -// e.g. "0.5.0" or "0.6.0-alpha". -#define TF_VERSION_STRING \ - (TF_STR(TF_MAJOR_VERSION) "." TF_STR(TF_MINOR_VERSION) "." TF_STR( \ - TF_PATCH_VERSION) TF_VERSION_SUFFIX) - -// GraphDef compatibility versions (the versions field in graph.proto). -// -// Each graph has producer and min_consumer versions, and each -// consumer has its own version and a min_producer. In addition, graphs can -// mark specific consumer versions as bad (to prevent bugs from executing). -// A consumer will execute a graph if the consumer's version is at least the -// graph's min_consumer, the graph's producer version is at least the consumer's -// min_producer, and the consumer version isn't specifically disallowed by the -// graph. -// -// By default, newly created graphs have producer version TF_GRAPH_DEF_VERSION -// min_consumer TF_GRAPH_DEF_MIN_CONSUMER, and no other bad consumer versions. -// -// Version history: -// -// 0. Graphs created before GraphDef versioning -// 1. First real version (2dec2015) -// 2. adjust_contrast only takes float, doesn't perform clamping (11dec2015) -// 3. Remove TileGrad, since it was equivalent to reduce_sum (30dec2015) -// 4. When support for this version is removed, we can safely make AttrValue -// parsing more strict with respect to empty list values (see -// 111635679, 7jan2016). -// 5. Graphs are wholly-validated during Session::Create() (7jan2016). -// 6. TensorFlow is scalar strict within Google (27jan2016). -// 7. Remove TopK in favor of TopKV2 (5feb2016). -// 8. Replace RandomCrop from C++ with pure Python (5feb2016). -// 9. Deprecate batch_norm_with_global_normalization (16feb2016). -// 10. Deprecate conv3d_backprop_{filter,input} (10jun2016). -// 11. Deprecate {batch}_self_adjoint_eig (3aug2016). -// 12. Graph consumers understand the node_def field of FunctionDef (22aug2016). -// 13. Deprecate multiple batch linear algebra ops (9sep2016). -// 14. Deprecate batch_matrix_* ops. (10sep2016). -// 15. Deprecate batch_fft_* ops. (14sep2016). -// 16. Deprecate tensor_array (v1) ops in favor of v2 (10nov2016). -// 17. Deprecate inv (11nov2016). -// 17. Expose reverse_v2 (10nov2016) -// 18. Add VariableV2 (30nov2016) -// 19. Deprecated ops created by models moved out of core SkipGram, NegTrain. -// (08dec2016) -// 20. Catch all version 1.0 changes to Python API generation. SplitV is now -// used for tf.split, ReverseV2 is now used by tf.reverse, ConcatV2 is -// now used by tf.concat. Graphs use flooring -// division and mod semantics. TensorArrayV3. (12dec2016) -// Also considered the version for when it is required for reduction -// ops' indices to be scalar or vector, and not higher rank. -// Some earlier graph def versions allowed this. -// 21. Dropped FunctionDef.Node support, switched to node_def introduced -// in version 12. (11jan2017) -// 22. Placeholder now can specify and enforce scalar and partial -// shapes, particularly when restoring a graph from GraphDef -// produced at version 22 or later. (04/10/2016) -// 23. Remove NonMaxSuppression in favor of NonMaxSuppressionV2. -// 24. Deprecate lookup ops (v1) ops in favor of v2 (30may2017) -// 25. Deprecate stack (v1) ops in favor of v2 (2017/6/15). -// 25. Deprecate RandomPoisson (v1) ops in favor of v2 (2017/10/25). -// 26. Add a bool 'stripped_default_attrs' to MetaInfoDef indicating -// whether default-valued attrs have been stripped from the nodes in the -// GraphDef. (7dec2017) -// 27. Deprecate TensorArray ops v2 in favor of v3 and deprecated io_ops -// deprecated in favor of V2 ops. (2018/01/23) -// 28. Deprecate MatrixExponential op in favor of Python implementation. -// (2018/08/21). -// (2019/02/15). Added `control_ret` field to FunctionDef proto, and -// `control_output` field to OpDef proto. -// 29. Deprecate StatefulStandardNormal op in favor of StatefulStandardNormalV2. -// (2019/03/25). -// (2019/04/17). Added `arg_attr` field to FunctionDefProto. -// 30. (2019/05/09) First date based GraphDef version. GraphDef -// versions advance by 1 each day after this point. - -#define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 -#define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 639 // Updated: 2021/1/7 - -// Checkpoint compatibility versions (the versions field in SavedSliceMeta). -// -// The checkpoint versions have the same semantics as GraphDef versions, but the -// numbering scheme is separate. We have no plans to ever deprecate checkpoint -// versions, but it's good to have this in place in case we ever need to. -// -// Version history: -// -// 0. Checkpoints saved before checkpoint versioning. -// 1. First real version (10feb2015). -#define TF_CHECKPOINT_VERSION_MIN_PRODUCER 0 -#define TF_CHECKPOINT_VERSION_MIN_CONSUMER 0 -#define TF_CHECKPOINT_VERSION 1 - -/// Version query functions (defined in generated version_info.cc) - -// Host compiler version (declared elsewhere to be __VERSION__) -extern const char* tf_compiler_version(); -// The git commit designator when tensorflow was built -// If no git repository, this will be "internal". -extern const char* tf_git_version(); -// Value of the _GLIBCXX_USE_CXX11_ABI flag, or 0 if it's not set. -extern int tf_cxx11_abi_flag(); -// Returns 1 if build is monolithic, or 0 otherwise. -extern int tf_monolithic_build(); - -#endif // TENSORFLOW_CORE_PUBLIC_VERSION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/builtin_op_data.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/builtin_op_data.h deleted file mode 100644 index 31d792ad..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/builtin_op_data.h +++ /dev/null @@ -1,484 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ -#define TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ - -#include - -#include "tensorflow/lite/c/common.h" - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible -// number of dimensions. -#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8 - -// TODO(aselle): Consider using "if this then that" for testing. - -// Useful placeholder to put in otherwise empty structs to avoid size warnings. -typedef struct { - char dummy; -} EmptyStructPlaceholder; - -// IMPORTANT: All new members of structs must be added at the end to ensure -// backwards compatibility. - -// Possible padding types (for convolutions) -typedef enum { - kTfLitePaddingUnknown = 0, - kTfLitePaddingSame, - kTfLitePaddingValid, -} TfLitePadding; - -typedef enum { - kTfLiteMirrorPaddingUnknown = 0, - kTfLiteMirrorPaddingReflect, - kTfLiteMirrorPaddingSymmetric, -} TfLiteMirrorPaddingMode; - -// TODO(b/130259536): We should move this out of builtin_op_data. -typedef struct { - int width; - int height; - int width_offset; - int height_offset; -} TfLitePaddingValues; - -typedef struct { - TfLiteMirrorPaddingMode mode; -} TfLiteMirrorPaddingParams; - -// Possible fused activation functions. -// TODO(aselle): rename to TfLiteActivation -typedef enum { - kTfLiteActNone = 0, - kTfLiteActRelu, - kTfLiteActReluN1To1, // min(max(-1, x), 1) - kTfLiteActRelu6, // min(max(0, x), 6) - kTfLiteActTanh, - kTfLiteActSignBit, - kTfLiteActSigmoid, -} TfLiteFusedActivation; - -typedef struct { - // Parameters for CONV_2D version 1. - TfLitePadding padding; - int stride_width; - int stride_height; - TfLiteFusedActivation activation; - - // Parameters for CONV_2D version 2. - // Note: Version 2 supports dilation values not equal to 1. - int dilation_width_factor; - int dilation_height_factor; -} TfLiteConvParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; - int filter_width; - int filter_height; - TfLiteFusedActivation activation; - struct { - TfLitePaddingValues padding; - } computed; -} TfLitePoolParams; - -typedef struct { - // Parameters for DepthwiseConv version 1 or above. - TfLitePadding padding; - int stride_width; - int stride_height; - // `depth_multiplier` is redundant. It's used by CPU kernels in - // TensorFlow 2.0 or below, but ignored in versions above. - // - // The information can be deduced from the shape of input and the shape of - // weights. Since the TFLiteConverter toolchain doesn't support partially - // specified shapes, relying on `depth_multiplier` stops us from supporting - // graphs with dynamic shape tensors. - // - // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this - // field. - int depth_multiplier; - TfLiteFusedActivation activation; - // Parameters for DepthwiseConv version 2 or above. - int dilation_width_factor; - int dilation_height_factor; -} TfLiteDepthwiseConvParams; - -typedef struct { - int rank; - TfLiteFusedActivation activation; - - // Parameter for SVDF version 4. - bool asymmetric_quantize_inputs; -} TfLiteSVDFParams; - -typedef struct { - TfLiteFusedActivation activation; - - // Parameter for RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteRNNParams; - -typedef struct { - bool time_major; - TfLiteFusedActivation activation; - - // Parameter for Sequence RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteSequenceRNNParams; - -typedef struct { - bool time_major; - TfLiteFusedActivation activation; - bool merge_outputs; - - // Parameter for Bidirectional RNN verison 3. - bool asymmetric_quantize_inputs; -} TfLiteBidirectionalSequenceRNNParams; - -typedef enum { - kTfLiteFullyConnectedWeightsFormatDefault = 0, - kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1, -} TfLiteFullyConnectedWeightsFormat; - -typedef struct { - // Parameters for FullyConnected version 1 or above. - TfLiteFusedActivation activation; - - // Parameters for FullyConnected version 2 or above. - TfLiteFullyConnectedWeightsFormat weights_format; - - // Parameters for FullyConnected version 5 or above. - // If set to true, then the number of dimensions in the input and the output - // tensors are the same. Furthermore, all but the last dimension of the input - // and output shapes will be equal. - bool keep_num_dims; - - // Parameters for FullyConnected version 7 or above. - // If set to true and the weights are quantized, then non constant inputs - // are quantized at evaluation time with asymmetric quantization. - bool asymmetric_quantize_inputs; -} TfLiteFullyConnectedParams; - -typedef enum { - kTfLiteLshProjectionUnknown = 0, - kTfLiteLshProjectionSparse = 1, - kTfLiteLshProjectionDense = 2, -} TfLiteLSHProjectionType; - -typedef struct { - TfLiteLSHProjectionType type; -} TfLiteLSHProjectionParams; - -typedef struct { - float beta; -} TfLiteSoftmaxParams; - -typedef struct { - int axis; - TfLiteFusedActivation activation; -} TfLiteConcatenationParams; - -typedef struct { - TfLiteFusedActivation activation; - // Parameter added for the version 4. - bool pot_scale_int16; -} TfLiteAddParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteSpaceToBatchNDParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteBatchToSpaceNDParams; - -typedef struct { - bool adj_x; - bool adj_y; - // Parameters for BatchMatMul version 4 or above. - // If set to true and the weights are quantized, then non constant inputs - // are quantized at evaluation time with asymmetric quantization. - bool asymmetric_quantize_inputs; -} TfLiteBatchMatMulParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteMulParams; - -typedef struct { - TfLiteFusedActivation activation; - // Parameter added for the version 5. - bool pot_scale_int16; -} TfLiteSubParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteDivParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteL2NormParams; - -typedef struct { - int radius; - float bias; - float alpha; - float beta; -} TfLiteLocalResponseNormParams; - -typedef enum { - kTfLiteLSTMFullKernel = 0, - kTfLiteLSTMBasicKernel -} TfLiteLSTMKernelType; - -typedef struct { - // Parameters for LSTM version 1. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // Parameters for LSTM version 2. - // kTfLiteLSTMBasicKernel is only supported in version 2 or above. - TfLiteLSTMKernelType kernel_type; - - // Parameters for LSTM version 4. - bool asymmetric_quantize_inputs; -} TfLiteLSTMParams; - -typedef struct { - // Parameters needed for the underlying LSTM. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // If set to true then the first dimension is time, otherwise batch. - bool time_major; - - // Parameter for unidirectional sequence RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteUnidirectionalSequenceLSTMParams; - -typedef struct { - // Parameters supported by version 1: - // Parameters inherited for the LSTM kernel. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // If true, store the outputs of both directions in the first output. - bool merge_outputs; - - // Parameters supported by version 2: - // If set to true then the first dimension is time, otherwise batch. - bool time_major; - - // Parameters supported by version 4: - // If set to true, then hybrid ops use asymmetric quantization for inputs. - bool asymmetric_quantize_inputs; -} TfLiteBidirectionalSequenceLSTMParams; - -typedef struct { - bool align_corners; - // half_pixel_centers assumes pixels are of half the actual dimensions, and - // yields more accurate resizes. Corresponds to the same argument for the - // original TensorFlow op in TF2.0. - bool half_pixel_centers; -} TfLiteResizeBilinearParams; - -typedef struct { - bool align_corners; - bool half_pixel_centers; -} TfLiteResizeNearestNeighborParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLitePadParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLitePadV2Params; - -typedef struct { - // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. - // For now we will fix the maximum possible number of dimensions. - int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT]; - int num_dimensions; -} TfLiteReshapeParams; - -typedef struct { - int ngram_size; - int max_skip_size; - bool include_all_ngrams; -} TfLiteSkipGramParams; - -typedef struct { - int block_size; -} TfLiteSpaceToDepthParams; - -typedef struct { - int block_size; -} TfLiteDepthToSpaceParams; - -typedef struct { - TfLiteType in_data_type; - TfLiteType out_data_type; -} TfLiteCastParams; - -typedef enum { - kTfLiteCombinerTypeSum = 0, - kTfLiteCombinerTypeMean = 1, - kTfLiteCombinerTypeSqrtn = 2, -} TfLiteCombinerType; - -typedef struct { - TfLiteCombinerType combiner; -} TfLiteEmbeddingLookupSparseParams; - -typedef struct { - int axis; -} TfLiteGatherParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteTransposeParams; - -typedef struct { - bool keep_dims; -} TfLiteReducerParams; - -typedef struct { - int num_splits; -} TfLiteSplitParams; - -typedef struct { - int num_splits; -} TfLiteSplitVParams; - -typedef struct { - // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. - // For now we will fix the maximum possible number of dimensions. - int squeeze_dims[8]; - int num_squeeze_dims; -} TfLiteSqueezeParams; - -typedef struct { - int begin_mask; - int end_mask; - int ellipsis_mask; - int new_axis_mask; - int shrink_axis_mask; -} TfLiteStridedSliceParams; - -typedef struct { - TfLiteType output_type; -} TfLiteArgMaxParams; - -typedef struct { - TfLiteType output_type; -} TfLiteArgMinParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; -} TfLiteTransposeConvParams; - -typedef struct { - bool validate_indices; -} TfLiteSparseToDenseParams; - -typedef struct { - TfLiteType out_type; -} TfLiteShapeParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteRankParams; - -typedef struct { - // Parameters supported by version 1: - float min; - float max; - int num_bits; - - // Parameters supported by version 2: - bool narrow_range; -} TfLiteFakeQuantParams; - -typedef struct { - int values_count; - int axis; -} TfLitePackParams; - -typedef struct { - int axis; -} TfLiteOneHotParams; - -typedef struct { - int num; - int axis; -} TfLiteUnpackParams; - -typedef struct { - float alpha; -} TfLiteLeakyReluParams; - -typedef struct { - TfLiteType index_out_type; -} TfLiteUniqueParams; - -typedef struct { - int seq_dim; - int batch_dim; -} TfLiteReverseSequenceParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteMatrixDiagParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteMatrixSetDiagParams; - -typedef struct { - int then_subgraph_index; - int else_subgraph_index; -} TfLiteIfParams; - -typedef struct { - int cond_subgraph_index; - int body_subgraph_index; -} TfLiteWhileParams; - -typedef struct { - bool exclusive; - bool reverse; -} TfLiteCumsumParams; - -typedef struct { - int init_subgraph_index; -} TfLiteCallOnceParams; - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus - -#endif // TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/c_api_types.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/c_api_types.h deleted file mode 100644 index e066e542..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/c_api_types.h +++ /dev/null @@ -1,92 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This file declares types used by the pure C inference API defined in c_api.h, -// some of which are also used in the C++ and C kernel and interpreter APIs. - -#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_ -#define TENSORFLOW_LITE_C_C_API_TYPES_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -// Define TFL_CAPI_EXPORT macro to export a function properly with a shared -// library. -#ifdef SWIG -#define TFL_CAPI_EXPORT -#else -#if defined(_WIN32) -#ifdef TFL_COMPILE_LIBRARY -#define TFL_CAPI_EXPORT __declspec(dllexport) -#else -#define TFL_CAPI_EXPORT __declspec(dllimport) -#endif // TFL_COMPILE_LIBRARY -#else -#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) -#endif // _WIN32 -#endif // SWIG - -typedef enum TfLiteStatus { - kTfLiteOk = 0, - - // Generally referring to an error in the runtime (i.e. interpreter) - kTfLiteError = 1, - - // Generally referring to an error from a TfLiteDelegate itself. - kTfLiteDelegateError = 2, - - // Generally referring to an error in applying a delegate due to - // incompatibility between runtime and delegate, e.g., this error is returned - // when trying to apply a TfLite delegate onto a model graph that's already - // immutable. - kTfLiteApplicationError = 3 -} TfLiteStatus; - -// Types supported by tensor -typedef enum { - kTfLiteNoType = 0, - kTfLiteFloat32 = 1, - kTfLiteInt32 = 2, - kTfLiteUInt8 = 3, - kTfLiteInt64 = 4, - kTfLiteString = 5, - kTfLiteBool = 6, - kTfLiteInt16 = 7, - kTfLiteComplex64 = 8, - kTfLiteInt8 = 9, - kTfLiteFloat16 = 10, - kTfLiteFloat64 = 11, - kTfLiteComplex128 = 12, - kTfLiteUInt64 = 13, -} TfLiteType; - -// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. -// If per-layer quantization is specified this field will still be populated in -// addition to TfLiteAffineQuantization. -// Parameters for asymmetric quantization. Quantized values can be converted -// back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteQuantizationParams { - float scale; - int32_t zero_point; -} TfLiteQuantizationParams; - -#ifdef __cplusplus -} // extern C -#endif -#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/common.c b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/common.c deleted file mode 100644 index 0b54fdac..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/common.c +++ /dev/null @@ -1,236 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/c/c_api_types.h" - -#ifndef TF_LITE_STATIC_MEMORY -#include -#include -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteIntArrayGetSizeInBytes(int size) { - static TfLiteIntArray dummy; - return sizeof(dummy) + sizeof(dummy.data[0]) * size; -} - -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) { - if (a == b) return 1; - if (a == NULL || b == NULL) return 0; - return TfLiteIntArrayEqualsArray(a, b->size, b->data); -} - -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]) { - if (a == NULL) return (b_size == 0); - if (a->size != b_size) return 0; - int i = 0; - for (; i < a->size; i++) - if (a->data[i] != b_data[i]) return 0; - return 1; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteIntArray* TfLiteIntArrayCreate(int size) { - TfLiteIntArray* ret = - (TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size)); - ret->size = size; - return ret; -} - -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) { - if (!src) return NULL; - TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size); - if (ret) { - memcpy(ret->data, src->data, src->size * sizeof(int)); - } - return ret; -} - -void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); } - -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteFloatArrayGetSizeInBytes(int size) { - static TfLiteFloatArray dummy; - return sizeof(dummy) + sizeof(dummy.data[0]) * size; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteFloatArray* TfLiteFloatArrayCreate(int size) { - TfLiteFloatArray* ret = - (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size)); - ret->size = size; - return ret; -} - -void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); } - -void TfLiteTensorDataFree(TfLiteTensor* t) { - if (t->allocation_type == kTfLiteDynamic || - t->allocation_type == kTfLitePersistentRo) { - free(t->data.raw); - } - t->data.raw = NULL; -} - -void TfLiteQuantizationFree(TfLiteQuantization* quantization) { - if (quantization->type == kTfLiteAffineQuantization) { - TfLiteAffineQuantization* q_params = - (TfLiteAffineQuantization*)(quantization->params); - if (q_params->scale) { - TfLiteFloatArrayFree(q_params->scale); - q_params->scale = NULL; - } - if (q_params->zero_point) { - TfLiteIntArrayFree(q_params->zero_point); - q_params->zero_point = NULL; - } - free(q_params); - } - quantization->params = NULL; - quantization->type = kTfLiteNoQuantization; -} - -void TfLiteSparsityFree(TfLiteSparsity* sparsity) { - if (sparsity == NULL) { - return; - } - - if (sparsity->traversal_order) { - TfLiteIntArrayFree(sparsity->traversal_order); - sparsity->traversal_order = NULL; - } - - if (sparsity->block_map) { - TfLiteIntArrayFree(sparsity->block_map); - sparsity->block_map = NULL; - } - - if (sparsity->dim_metadata) { - int i = 0; - for (; i < sparsity->dim_metadata_size; i++) { - TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i]; - if (metadata.format == kTfLiteDimSparseCSR) { - TfLiteIntArrayFree(metadata.array_segments); - metadata.array_segments = NULL; - TfLiteIntArrayFree(metadata.array_indices); - metadata.array_indices = NULL; - } - } - free(sparsity->dim_metadata); - sparsity->dim_metadata = NULL; - } - - free(sparsity); -} - -void TfLiteTensorFree(TfLiteTensor* t) { - TfLiteTensorDataFree(t); - if (t->dims) TfLiteIntArrayFree(t->dims); - t->dims = NULL; - - if (t->dims_signature) { - TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature); - } - t->dims_signature = NULL; - - TfLiteQuantizationFree(&t->quantization); - TfLiteSparsityFree(t->sparsity); - t->sparsity = NULL; -} - -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor) { - TfLiteTensorFree(tensor); - tensor->type = type; - tensor->name = name; - tensor->dims = dims; - tensor->params = quantization; - tensor->data.raw = buffer; - tensor->bytes = size; - tensor->allocation_type = allocation_type; - tensor->allocation = allocation; - tensor->is_variable = is_variable; - - tensor->quantization.type = kTfLiteNoQuantization; - tensor->quantization.params = NULL; -} - -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) { - if (tensor->allocation_type != kTfLiteDynamic && - tensor->allocation_type != kTfLitePersistentRo) { - return; - } - // TODO(b/145340303): Tensor data should be aligned. - if (!tensor->data.raw) { - tensor->data.raw = malloc(num_bytes); - } else if (num_bytes > tensor->bytes) { - tensor->data.raw = realloc(tensor->data.raw, num_bytes); - } - tensor->bytes = num_bytes; -} -#endif // TF_LITE_STATIC_MEMORY - -const char* TfLiteTypeGetName(TfLiteType type) { - switch (type) { - case kTfLiteNoType: - return "NOTYPE"; - case kTfLiteFloat32: - return "FLOAT32"; - case kTfLiteInt16: - return "INT16"; - case kTfLiteInt32: - return "INT32"; - case kTfLiteUInt8: - return "UINT8"; - case kTfLiteInt8: - return "INT8"; - case kTfLiteInt64: - return "INT64"; - case kTfLiteUInt64: - return "UINT64"; - case kTfLiteBool: - return "BOOL"; - case kTfLiteComplex64: - return "COMPLEX64"; - case kTfLiteComplex128: - return "COMPLEX128"; - case kTfLiteString: - return "STRING"; - case kTfLiteFloat16: - return "FLOAT16"; - case kTfLiteFloat64: - return "FLOAT64"; - } - return "Unknown type"; -} - -TfLiteDelegate TfLiteDelegateCreate() { - TfLiteDelegate d = { - .data_ = NULL, - .Prepare = NULL, - .CopyFromBufferHandle = NULL, - .CopyToBufferHandle = NULL, - .FreeBufferHandle = NULL, - .flags = kTfLiteDelegateFlagsNone, - }; - return d; -} diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/common.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/common.h deleted file mode 100644 index 5398fbc6..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/c/common.h +++ /dev/null @@ -1,913 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This file defines common C types and APIs for implementing operations, -// delegates and other constructs in TensorFlow Lite. The actual operations and -// delegates can be defined using C++, but the interface between the interpreter -// and the operations are C. -// -// Summary of abstractions -// TF_LITE_ENSURE - Self-sufficient error checking -// TfLiteStatus - Status reporting -// TfLiteIntArray - stores tensor shapes (dims), -// TfLiteContext - allows an op to access the tensors -// TfLiteTensor - tensor (a multidimensional array) -// TfLiteNode - a single node or operation -// TfLiteRegistration - the implementation of a conceptual operation. -// TfLiteDelegate - allows delegation of nodes to alternative backends. -// -// Some abstractions in this file are created and managed by Interpreter. -// -// NOTE: The order of values in these structs are "semi-ABI stable". New values -// should be added only to the end of structs and never reordered. - -#ifndef TENSORFLOW_LITE_C_COMMON_H_ -#define TENSORFLOW_LITE_C_COMMON_H_ - -#include -#include -#include - -#include "tensorflow/lite/c/c_api_types.h" // IWYU pragma: export - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// The list of external context types known to TF Lite. This list exists solely -// to avoid conflicts and to ensure ops can share the external contexts they -// need. Access to the external contexts is controlled by one of the -// corresponding support files. -typedef enum TfLiteExternalContextType { - kTfLiteEigenContext = 0, // include eigen_support.h to use. - kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. - kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. - kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. - kTfLiteMaxExternalContexts = 4 -} TfLiteExternalContextType; - -// Forward declare so dependent structs and methods can reference these types -// prior to the struct definitions. -struct TfLiteContext; -struct TfLiteDelegate; -struct TfLiteRegistration; - -// An external context is a collection of information unrelated to the TF Lite -// framework, but useful to a subset of the ops. TF Lite knows very little -// about the actual contexts, but it keeps a list of them, and is able to -// refresh them if configurations like the number of recommended threads -// change. -typedef struct TfLiteExternalContext { - TfLiteExternalContextType type; - TfLiteStatus (*Refresh)(struct TfLiteContext* context); -} TfLiteExternalContext; - -#define kTfLiteOptionalTensor (-1) - -// Fixed size list of integers. Used for dimensions and inputs/outputs tensor -// indices -typedef struct TfLiteIntArray { - int size; -// gcc 6.1+ have a bug where flexible members aren't properly handled -// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c -#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1) || \ - defined(HEXAGON) || (__clang_major__ == 7 && __clang_minor__ == 1) - int data[0]; -#else - int data[]; -#endif -} TfLiteIntArray; - -// Given the size (number of elements) in a TfLiteIntArray, calculate its size -// in bytes. -int TfLiteIntArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteIntArrayFree(). -TfLiteIntArray* TfLiteIntArrayCreate(int size); -#endif - -// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); - -// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a copy of an array passed as `src`. -// You are expected to free memory with TfLiteIntArrayFree -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); - -// Free memory of array `a`. -void TfLiteIntArrayFree(TfLiteIntArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Fixed size list of floats. Used for per-channel quantization. -typedef struct TfLiteFloatArray { - int size; -// gcc 6.1+ have a bug where flexible members aren't properly handled -// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c -// This also applies to the toolchain used for Qualcomm Hexagon DSPs. -#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1 - float data[0]; -#else - float data[]; -#endif -} TfLiteFloatArray; - -// Given the size (number of elements) in a TfLiteFloatArray, calculate its size -// in bytes. -int TfLiteFloatArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteFloatArrayFree(). -TfLiteFloatArray* TfLiteFloatArrayCreate(int size); - -// Free memory of array `a`. -void TfLiteFloatArrayFree(TfLiteFloatArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Since we must not depend on any libraries, define a minimal subset of -// error macros while avoiding names that have pre-conceived meanings like -// CHECK and check. - -// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than -// calling the context->ReportError function directly, so that message strings -// can be stripped out if the binary size needs to be severely optimized. -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) \ - do { \ - (context)->ReportError((context), __VA_ARGS__); \ - } while (false) - -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ - do { \ - if ((context) != nullptr) { \ - (context)->ReportError((context), __VA_ARGS__); \ - } \ - } while (false) -#else // TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) -#endif // TF_LITE_STRIP_ERROR_STRINGS - -// Check whether value is true, and if not return kTfLiteError from -// the current function (and report the error string msg). -#define TF_LITE_ENSURE_MSG(context, value, msg) \ - do { \ - if (!(value)) { \ - TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \ - return kTfLiteError; \ - } \ - } while (0) - -// Check whether the value `a` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -#define TF_LITE_ENSURE(context, a) \ - do { \ - if (!(a)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ - __LINE__, #a); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_STATUS(a) \ - do { \ - const TfLiteStatus s = (a); \ - if (s != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Check whether the value `a == b` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -// `a` and `b` may be evaluated more than once, so no side effects or -// extremely expensive computations should be done. -// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. -#define TF_LITE_ENSURE_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ - __LINE__, #a, #b, (a), (b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ - __LINE__, #a, #b, TfLiteTypeGetName(a), \ - TfLiteTypeGetName(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ - do { \ - auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ - if (delta > epsilon) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ - __FILE__, __LINE__, #a, #b, static_cast(a), \ - static_cast(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_OK(context, status) \ - do { \ - const TfLiteStatus s = (status); \ - if ((s) != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Single-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex64 { - float re, im; // real and imaginary parts, respectively. -} TfLiteComplex64; - -// Double-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex128 { - double re, im; // real and imaginary parts, respectively. -} TfLiteComplex128; - -// Half precision data type compatible with the C99 definition. -typedef struct TfLiteFloat16 { - uint16_t data; -} TfLiteFloat16; - -// Return the name of a given type, for error reporting purposes. -const char* TfLiteTypeGetName(TfLiteType type); - -// SupportedQuantizationTypes. -typedef enum TfLiteQuantizationType { - // No quantization. - kTfLiteNoQuantization = 0, - // Affine quantization (with support for per-channel quantization). - // Corresponds to TfLiteAffineQuantization. - kTfLiteAffineQuantization = 1, -} TfLiteQuantizationType; - -// Structure specifying the quantization used by the tensor, if-any. -typedef struct TfLiteQuantization { - // The type of quantization held by params. - TfLiteQuantizationType type; - // Holds an optional reference to a quantization param structure. The actual - // type depends on the value of the `type` field (see the comment there for - // the values and corresponding types). - void* params; -} TfLiteQuantization; - -// Parameters for asymmetric quantization across a dimension (i.e per output -// channel quantization). -// quantized_dimension specifies which dimension the scales and zero_points -// correspond to. -// For a particular value in quantized_dimension, quantized values can be -// converted back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteAffineQuantization { - TfLiteFloatArray* scale; - TfLiteIntArray* zero_point; - int32_t quantized_dimension; -} TfLiteAffineQuantization; - -/* A union of pointers that points to memory for a given tensor. */ -typedef union TfLitePtrUnion { - /* Do not access these members directly, if possible, use - * GetTensorData(tensor) instead, otherwise only access .data, as other - * members are deprecated. */ - int32_t* i32; - int64_t* i64; - uint64_t* u64; - float* f; - TfLiteFloat16* f16; - double* f64; - char* raw; - const char* raw_const; - uint8_t* uint8; - bool* b; - int16_t* i16; - TfLiteComplex64* c64; - TfLiteComplex128* c128; - int8_t* int8; - /* Only use this member. */ - void* data; -} TfLitePtrUnion; - -// Memory allocation strategies. -// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. -// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, -// and available during eval. -// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and -// only available during eval. -// * kTfLiteDynamic: Allocated during eval, or for string tensors. -// * kTfLitePersistentRo: Allocated and populated during prepare. This is -// useful for tensors that can be computed during prepare and treated -// as constant inputs for downstream ops (also in prepare). -// * kTfLiteCustom: Custom memory allocation provided by the user. See -// TfLiteCustomAllocation below. -typedef enum TfLiteAllocationType { - kTfLiteMemNone = 0, - kTfLiteMmapRo, - kTfLiteArenaRw, - kTfLiteArenaRwPersistent, - kTfLiteDynamic, - kTfLitePersistentRo, - kTfLiteCustom, -} TfLiteAllocationType; - -// The delegates should use zero or positive integers to represent handles. -// -1 is reserved from unallocated status. -typedef int TfLiteBufferHandle; -enum { - kTfLiteNullBufferHandle = -1, -}; - -// Storage format of each dimension in a sparse tensor. -typedef enum TfLiteDimensionType { - kTfLiteDimDense = 0, - kTfLiteDimSparseCSR, -} TfLiteDimensionType; - -// Metadata to encode each dimension in a sparse tensor. -typedef struct TfLiteDimensionMetadata { - TfLiteDimensionType format; - int dense_size; - TfLiteIntArray* array_segments; - TfLiteIntArray* array_indices; -} TfLiteDimensionMetadata; - -// Parameters used to encode a sparse tensor. For detailed explanation of each -// field please refer to lite/schema/schema.fbs. -typedef struct TfLiteSparsity { - TfLiteIntArray* traversal_order; - TfLiteIntArray* block_map; - TfLiteDimensionMetadata* dim_metadata; - int dim_metadata_size; -} TfLiteSparsity; - -// Defines a custom memory allocation not owned by the runtime. -// `data` should be aligned to kDefaultTensorAlignment defined in -// lite/util.h. (Currently 64 bytes) -// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. -typedef struct TfLiteCustomAllocation { - void* data; - size_t bytes; -} TfLiteCustomAllocation; - -// A tensor in the interpreter system which is a wrapper around a buffer of -// data including a dimensionality (or NULL if not currently defined). -#ifndef TF_LITE_STATIC_MEMORY -typedef struct TfLiteTensor { - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - // Quantization information. - TfLiteQuantizationParams params; - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // An opaque pointer to a tflite::MMapAllocation - const void* allocation; - - // Null-terminated name of this tensor. - const char* name; - - // The delegate which knows how to handle `buffer_handle`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; - - // An integer buffer handle that can be handled by `delegate`. - // The value is valid only when delegate is not null. - // WARNING: This is an experimental interface that is subject to change. - TfLiteBufferHandle buffer_handle; - - // If the delegate uses its own buffer (e.g. GPU memory), the delegate is - // responsible to set data_is_stale to true. - // `delegate->CopyFromBufferHandle` can be called to copy the data from - // delegate buffer. - // WARNING: This is an // experimental interface that is subject to change. - bool data_is_stale; - - // True if the tensor is a variable. - bool is_variable; - - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Parameters used to encode a sparse tensor. - // This is optional. The field is NULL if a tensor is dense. - // WARNING: This is an experimental interface that is subject to change. - TfLiteSparsity* sparsity; - - // Optional. Encodes shapes with unknown dimensions with -1. This field is - // only populated when unknown dimensions exist in a read-write tensor (i.e. - // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and - // `dims_signature` contains [1, -1, -1, 3]). - const TfLiteIntArray* dims_signature; -} TfLiteTensor; - -// A structure representing an instance of a node. -// This structure only exhibits the inputs, outputs and user defined data, not -// other features like the type. -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // intermediate tensors to this node expressed as indices into the simulator's - // tensors. - TfLiteIntArray* intermediates; - - // Temporary tensors uses during the computations. This usually contains no - // tensors, but ops are allowed to change that if they need scratch space of - // any sort. - TfLiteIntArray* temporaries; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; - - // The pointer to the delegate. This is non-null only when the node is - // created by calling `interpreter.ModifyGraphWithDelegate`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; -} TfLiteNode; -#else // defined(TF_LITE_STATIC_MEMORY)? -// NOTE: This flag is opt-in only at compile time. -// -// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct -// contains only the minimum fields required to initialize and prepare a micro -// inference graph. The fields in this struct have been ordered from -// largest-to-smallest for optimal struct sizeof. -// -// This struct does not use: -// - allocation -// - buffer_handle -// - data_is_stale -// - delegate -// - dims_signature -// - name -// - sparsity -typedef struct TfLiteTensor { - // TODO(b/155784997): Consider consolidating these quantization fields: - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Quantization information. - TfLiteQuantizationParams params; - - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - - // True if the tensor is a variable. - bool is_variable; -} TfLiteTensor; - -// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains -// only the minimum fields required to represent a node. -// -// This struct does not use: -// - delegate -// - intermediates -// - temporaries -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; -} TfLiteNode; -#endif // TF_LITE_STATIC_MEMORY - -// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount -// of information required for a kernel to run during TfLiteRegistration::Eval. -// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM -// builds with this flag by default internally. -typedef struct TfLiteEvalTensor { - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. - TfLiteIntArray* dims; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; -} TfLiteEvalTensor; - -#ifndef TF_LITE_STATIC_MEMORY -// Free data memory of tensor `t`. -void TfLiteTensorDataFree(TfLiteTensor* t); - -// Free quantization data. -void TfLiteQuantizationFree(TfLiteQuantization* quantization); - -// Free sparsity parameters. -void TfLiteSparsityFree(TfLiteSparsity* sparsity); - -// Free memory of tensor `t`. -void TfLiteTensorFree(TfLiteTensor* t); - -// Set all of a tensor's fields (and free any previously allocated data). -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor); - -// Resize the allocated data of a (dynamic) tensor. Tensors with allocation -// types other than kTfLiteDynamic will be ignored. -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); -#endif // TF_LITE_STATIC_MEMORY - -// WARNING: This is an experimental interface that is subject to change. -// -// Currently, TfLiteDelegateParams has to be allocated in a way that it's -// trivially destructable. It will be stored as `builtin_data` field in -// `TfLiteNode` of the delegate node. -// -// See also the `CreateDelegateParams` function in `interpreter.cc` details. -typedef struct TfLiteDelegateParams { - struct TfLiteDelegate* delegate; - TfLiteIntArray* nodes_to_replace; - TfLiteIntArray* input_tensors; - TfLiteIntArray* output_tensors; -} TfLiteDelegateParams; - -typedef struct TfLiteContext { - // Number of tensors in the context. - size_t tensors_size; - - // The execution plan contains a list of the node indices in execution - // order. execution_plan->size is the current number of nodes. And, - // execution_plan->data[0] is the first node that needs to be run. - // TfLiteDelegates can traverse the current execution plan by iterating - // through each member of this array and using GetNodeAndRegistration() to - // access details about a node. i.e. - // TfLiteIntArray* execution_plan; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); - // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { - // int node_index = execution_plan->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // } - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, - TfLiteIntArray** execution_plan); - - // An array of tensors in the interpreter context (of length `tensors_size`) - TfLiteTensor* tensors; - - // opaque full context ptr (an opaque c++ data structure) - void* impl_; - - // Request memory pointer be resized. Updates dimensions on the tensor. - // NOTE: ResizeTensor takes ownership of newSize. - TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, - TfLiteIntArray* new_size); - // Request that an error be reported with format string msg. - void (*ReportError)(struct TfLiteContext*, const char* msg, ...); - - // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If - // non-null, the value pointed to by `first_new_tensor_index` will be set to - // the index of the first new tensor. - TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, - int* first_new_tensor_index); - - // Get a Tensor node by node_index. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetNodeAndRegistration)( - struct TfLiteContext*, int node_index, TfLiteNode** node, - struct TfLiteRegistration** registration); - - // Replace ops with one or more stub delegate operations. This function - // does not take ownership of `nodes_to_replace`. - TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( - struct TfLiteContext*, struct TfLiteRegistration registration, - const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); - - // Number of threads that are recommended to subsystems like gemmlowp and - // eigen. - int recommended_num_threads; - - // Access external contexts by type. - // WARNING: This is an experimental interface that is subject to change. - TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, - TfLiteExternalContextType); - // Set the value of a external context. Does not take ownership of the - // pointer. - // WARNING: This is an experimental interface that is subject to change. - void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, - TfLiteExternalContext*); - - // Flag for allowing float16 precision for FP32 calculation. - // default: false. - // WARNING: This is an experimental API and subject to change. - bool allow_fp32_relax_to_fp16; - - // Pointer to the op-level profiler, if set; nullptr otherwise. - void* profiler; - - // Allocate persistent buffer which has the same life time as the interpreter. - // Returns nullptr on failure. - // The memory is allocated from heap for TFL, and from tail in TFLM. - // This method is only available in Init or Prepare stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); - - // Allocate a buffer which will be deallocated right after invoke phase. - // The memory is allocated from heap in TFL, and from volatile arena in TFLM. - // This method is only available in invoke stage. - // NOTE: If possible use RequestScratchBufferInArena method to avoid memory - // allocation during inference time. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, - void** ptr); - - // Request a scratch buffer in the arena through static memory planning. - // This method is only available in Prepare stage and the buffer is allocated - // by the interpreter between Prepare and Eval stage. In Eval stage, - // GetScratchBuffer API can be used to fetch the address. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, - size_t bytes, int* buffer_idx); - - // Get the scratch buffer pointer. - // This method is only available in Eval stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); - - // Resize the memory pointer of the `tensor`. This method behaves the same as - // `ResizeTensor`, except that it makes a copy of the shape array internally - // so the shape array could be deallocated right afterwards. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, - TfLiteTensor* tensor, int dims, - const int* shape); - - // This method provides a preview of post-delegation partitioning. Each - // TfLiteDelegateParams in the referenced array corresponds to one instance of - // the delegate kernel. - // Example usage: - // - // TfLiteIntArray* nodes_to_replace = ...; - // TfLiteDelegateParams* params_array; - // int num_partitions = 0; - // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( - // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); - // for (int idx = 0; idx < num_partitions; idx++) { - // const auto& partition_params = params_array[idx]; - // ... - // } - // - // NOTE: The context owns the memory referenced by partition_params_array. It - // will be cleared with another call to PreviewDelegateParitioning, or after - // TfLiteDelegateParams::Prepare returns. - // - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*PreviewDelegatePartitioning)( - struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, - TfLiteDelegateParams** partition_params_array, int* num_partitions); - - // Returns a TfLiteTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, - int tensor_idx); - - // Returns a TfLiteEvalTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, - int tensor_idx); -} TfLiteContext; - -typedef struct TfLiteRegistration { - // Initializes the op from serialized data. - // If a built-in op: - // `buffer` is the op's params data (TfLiteLSTMParams*). - // `length` is zero. - // If custom op: - // `buffer` is the op's `custom_options`. - // `length` is the size of the buffer. - // - // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer - // or an instance of a struct). - // - // The returned pointer will be stored with the node in the `user_data` field, - // accessible within prepare and invoke functions below. - // NOTE: if the data is already in the desired format, simply implement this - // function to return `nullptr` and implement the free function to be a no-op. - void* (*init)(TfLiteContext* context, const char* buffer, size_t length); - - // The pointer `buffer` is the data previously returned by an init invocation. - void (*free)(TfLiteContext* context, void* buffer); - - // prepare is called when the inputs this node depends on have been resized. - // context->ResizeTensor() can be called to request output tensors to be - // resized. - // - // Returns kTfLiteOk on success. - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); - - // Execute the node (should read node->inputs and output to node->outputs). - // Returns kTfLiteOk on success. - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); - - // profiling_string is called during summarization of profiling information - // in order to group executions together. Providing a value here will cause a - // given op to appear multiple times is the profiling report. This is - // particularly useful for custom ops that can perform significantly - // different calculations depending on their `user-data`. - const char* (*profiling_string)(const TfLiteContext* context, - const TfLiteNode* node); - - // Builtin codes. If this kernel refers to a builtin this is the code - // of the builtin. This is so we can do marshaling to other frameworks like - // NN API. - // Note: It is the responsibility of the registration binder to set this - // properly. - int32_t builtin_code; - - // Custom op name. If the op is a builtin, this will be null. - // Note: It is the responsibility of the registration binder to set this - // properly. - // WARNING: This is an experimental interface that is subject to change. - const char* custom_name; - - // The version of the op. - // Note: It is the responsibility of the registration binder to set this - // properly. - int version; -} TfLiteRegistration; - -// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the -// values should be 1, 2, 4, 8, ...etc. -typedef enum TfLiteDelegateFlags { - kTfLiteDelegateFlagsNone = 0, - // The flag is set if the delegate can handle dynamic sized tensors. - // For example, the output shape of a `Resize` op with non-constant shape - // can only be inferred when the op is invoked. - // In this case, the Delegate is responsible for calling - // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling - // `ResizeTensor` when invoking the op. - // - // If the delegate isn't capable to handle dynamic tensors, this flag need - // to be set to false. - kTfLiteDelegateFlagsAllowDynamicTensors = 1, - - // This flag can be used by delegates (that allow dynamic tensors) to ensure - // applicable tensor shapes are automatically propagated in the case of tensor - // resizing. - // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors - // of a delegate kernel will have correct shapes before its Prepare() method - // is called. The runtime leverages TFLite builtin ops in the original - // execution plan to propagate shapes. - // - // A few points to note: - // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is - // false, this one is redundant since the delegate kernels are re-initialized - // every time tensors are resized. - // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra - // work is required to prepare the original execution plan. - // 3. This flag requires that the original execution plan only have ops with - // valid registrations (and not 'dummy' custom ops like with Flex). - // WARNING: This feature is experimental and subject to change. - kTfLiteDelegateFlagsRequirePropagatedShapes = 2 -} TfLiteDelegateFlags; - -// WARNING: This is an experimental interface that is subject to change. -typedef struct TfLiteDelegate { - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for doing this when it is destroyed. - void* data_; - - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. - TfLiteStatus (*Prepare)(TfLiteContext* context, - struct TfLiteDelegate* delegate); - - // Copy the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. - TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Copy the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. - TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Free the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. - void (*FreeBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle* handle); - - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. - int64_t flags; -} TfLiteDelegate; - -// Build a 'null' delegate, with all the fields properly set to their default -// values. -TfLiteDelegate TfLiteDelegateCreate(); - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus -#endif // TENSORFLOW_LITE_C_COMMON_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/error_reporter.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/error_reporter.cc deleted file mode 100644 index 7070eaa5..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/error_reporter.cc +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/core/api/error_reporter.h" -#include - -namespace tflite { - -int ErrorReporter::Report(const char* format, ...) { - va_list args; - va_start(args, format); - int code = Report(format, args); - va_end(args); - return code; -} - -// TODO(aselle): Make the name of ReportError on context the same, so -// we can use the ensure functions w/o a context and w/ a reporter. -int ErrorReporter::ReportError(void*, const char* format, ...) { - va_list args; - va_start(args, format); - int code = Report(format, args); - va_end(args); - return code; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/error_reporter.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/error_reporter.h deleted file mode 100644 index 05839a61..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/error_reporter.h +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ -#define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ - -#include - -namespace tflite { - -/// A functor that reports error to supporting system. Invoked similar to -/// printf. -/// -/// Usage: -/// ErrorReporter foo; -/// foo.Report("test %d", 5); -/// or -/// va_list args; -/// foo.Report("test %d", args); // where args is va_list -/// -/// Subclass ErrorReporter to provide another reporting destination. -/// For example, if you have a GUI program, you might redirect to a buffer -/// that drives a GUI error log box. -class ErrorReporter { - public: - virtual ~ErrorReporter() {} - virtual int Report(const char* format, va_list args) = 0; - int Report(const char* format, ...); - int ReportError(void*, const char* format, ...); -}; - -} // namespace tflite - -// You should not make bare calls to the error reporter, instead use the -// TF_LITE_REPORT_ERROR macro, since this allows message strings to be -// stripped when the binary size has to be optimized. If you are looking to -// reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and -// every call will be stubbed out, taking no memory. -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_REPORT_ERROR(reporter, ...) \ - do { \ - static_cast(reporter)->Report(__VA_ARGS__); \ - } while (false) -#else // TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_REPORT_ERROR(reporter, ...) -#endif // TF_LITE_STRIP_ERROR_STRINGS - -#endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.cc deleted file mode 100644 index fcd04373..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ /dev/null @@ -1,1945 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" - -#include -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -namespace { - -// Utility class for safely allocating POD data. This is useful for avoiding -// leaks in cases where op params are allocated but fail to propagate to the -// parsed op data (e.g., when model parameters are invalid). -class SafeBuiltinDataAllocator { - public: - class BuiltinDataDeleter { - public: - explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator) - : allocator_(allocator) {} - - void operator()(void* data) { allocator_->Deallocate(data); } - - private: - BuiltinDataAllocator* allocator_; - }; - - template - using BuiltinDataPtr = std::unique_ptr; - - explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator) - : allocator_(allocator) {} - - template - BuiltinDataPtr Allocate() { - return BuiltinDataPtr(allocator_->AllocatePOD(), - BuiltinDataDeleter(allocator_)); - } - - private: - BuiltinDataAllocator* allocator_; -}; - -// All the Parse functions take some pointers as params and this function has -// the common DCHECKs to catch if any of those are nullptr. -void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - TFLITE_DCHECK(op != nullptr); - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(allocator != nullptr); - TFLITE_DCHECK(builtin_data != nullptr); -} - -// Copies the contents from the flatbuffer int vector `flatbuffer` into the -// int array `buffer`. `flat_vector` and `buffer` represent the same -// configuration operation for a given operation. -TfLiteStatus FlatBufferIntVectorToArray( - int max_size_of_buffer, const flatbuffers::Vector* flat_vector, - int* buffer, ErrorReporter* error_reporter, const char* op_name) { - if (!flat_vector) { - TF_LITE_REPORT_ERROR(error_reporter, - "Input array not provided for operation '%s'.\n", - op_name); - return kTfLiteError; - } else { - size_t num_dimensions = flat_vector->size(); - if (num_dimensions > max_size_of_buffer / sizeof(int)) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Found too many dimensions in the input array of operation '%s'.\n", - op_name); - return kTfLiteError; - } else { - for (size_t i = 0; i < num_dimensions; ++i) { - buffer[i] = flat_vector->Get(i); - } - } - } - return kTfLiteOk; -} - -// Converts the flatbuffer activation to what is used at runtime. -TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) { - switch (activation) { - case ActivationFunctionType_NONE: - return kTfLiteActNone; - case ActivationFunctionType_RELU: - return kTfLiteActRelu; - case ActivationFunctionType_RELU_N1_TO_1: - return kTfLiteActReluN1To1; - case ActivationFunctionType_RELU6: - return kTfLiteActRelu6; - case ActivationFunctionType_TANH: - return kTfLiteActTanh; - case ActivationFunctionType_SIGN_BIT: - return kTfLiteActSignBit; - } - return kTfLiteActNone; -} - -// Converts the flatbuffer padding enum to what is used at runtime. -TfLitePadding ConvertPadding(Padding padding) { - switch (padding) { - case Padding_SAME: - return kTfLitePaddingSame; - case Padding_VALID: - return kTfLitePaddingValid; - } - return kTfLitePaddingUnknown; -} - -#ifndef TF_LITE_STATIC_MEMORY -TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - auto parseLSHProjectionType = [](LSHProjectionType type) { - switch (type) { - case LSHProjectionType_SPARSE: - return kTfLiteLshProjectionSparse; - case LSHProjectionType_DENSE: - return kTfLiteLshProjectionDense; - default: - return kTfLiteLshProjectionUnknown; - } - }; - auto parseCombinerType = [](CombinerType type) { - switch (type) { - case CombinerType_MEAN: - return kTfLiteCombinerTypeMean; - case CombinerType_SQRTN: - return kTfLiteCombinerTypeSqrtn; - case CombinerType_SUM: - default: - return kTfLiteCombinerTypeSum; - } - }; - - SafeBuiltinDataAllocator safe_allocator(allocator); - *builtin_data = nullptr; - switch (op_type) { - case BuiltinOperator_ABS: { - return ParseAbs(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ADD: { - return ParseAdd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ARG_MAX: { - return ParseArgMax(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ARG_MIN: { - return ParseArgMin(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_AVERAGE_POOL_2D: { - return ParsePool(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CEIL: { - return ParseCeil(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CONCATENATION: { - return ParseConcatenation(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CONV_2D: { - return ParseConv2D(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DEPTHWISE_CONV_2D: { - return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DEQUANTIZE: { - return ParseDequantize(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_DIV: { - return ParseDiv(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_EXP: { - return ParseExp(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FILL: { - return ParseFill(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FLOOR: { - return ParseFloor(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FLOOR_DIV: { - return ParseFloorDiv(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FLOOR_MOD: { - return ParseFloorMod(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_FULLY_CONNECTED: { - return ParseFullyConnected(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_GREATER: { - return ParseGreater(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_GREATER_EQUAL: { - return ParseGreaterEqual(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_HARD_SWISH: { - return ParseHardSwish(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_L2_NORMALIZATION: { - return ParseL2Normalization(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_L2_POOL_2D: { - return ParsePool(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LESS: { - return ParseLess(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LESS_EQUAL: { - return ParseLessEqual(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOG: { - return ParseLog(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGICAL_AND: { - return ParseLogicalAnd(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGICAL_NOT: { - return ParseLogicalNot(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGICAL_OR: { - return ParseLogicalOr(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_LOGISTIC: { - return ParseLogistic(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MAXIMUM: { - return ParseMaximum(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MAX_POOL_2D: { - return ParsePool(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MEAN: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MINIMUM: { - return ParseMinimum(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_MUL: { - return ParseMul(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_NEG: { - return ParseNeg(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_NOT_EQUAL: { - return ParseNotEqual(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PACK: { - return ParsePack(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PAD: { - return ParsePad(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PADV2: { - return ParsePadV2(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_POW: { - return ParsePow(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_PRELU: { - return ParsePrelu(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_QUANTIZE: { - return ParseQuantize(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_ANY: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_MAX: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_MIN: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_REDUCE_PROD: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RELU: { - return ParseRelu(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RELU6: { - return ParseRelu6(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RESHAPE: { - return ParseReshape(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RESIZE_BILINEAR: { - return ParseResizeBilinear(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: { - return ParseResizeNearestNeighbor(op, error_reporter, allocator, - builtin_data); - } - - case BuiltinOperator_ROUND: { - return ParseRound(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_RSQRT: { - return ParseRsqrt(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SHAPE: { - return ParseShape(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SIN: { - return ParseSin(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SOFTMAX: { - return ParseSoftmax(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPACE_TO_DEPTH: { - return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPLIT: { - return ParseSplit(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SPLIT_V: { - return ParseSplitV(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SQRT: { - return ParseSqrt(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SQUARE: { - return ParseSquare(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_STRIDED_SLICE: { - return ParseStridedSlice(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SUB: { - return ParseSub(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SUM: { - return ParseReducer(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_SVDF: { - return ParseSvdf(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_TANH: { - return ParseTanh(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_TRANSPOSE_CONV: { - return ParseTransposeConv(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_UNPACK: { - return ParseUnpack(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_ZEROS_LIKE: { - return ParseZerosLike(op, error_reporter, allocator, builtin_data); - } - - case BuiltinOperator_CAST: { - return ParseCast(op, error_reporter, allocator, builtin_data); - } - case BuiltinOperator_LSH_PROJECTION: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* lshParams = - op->builtin_options_as_LSHProjectionOptions()) { - params->type = parseLSHProjectionType(lshParams->type()); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* sequence_rnn_params = - op->builtin_options_as_SequenceRNNOptions()) { - params->activation = - ConvertActivation(sequence_rnn_params->fused_activation_function()); - params->time_major = sequence_rnn_params->time_major(); - params->asymmetric_quantize_inputs = - sequence_rnn_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bidi_sequence_rnn_params = - op->builtin_options_as_BidirectionalSequenceRNNOptions()) { - params->activation = ConvertActivation( - bidi_sequence_rnn_params->fused_activation_function()); - params->time_major = bidi_sequence_rnn_params->time_major(); - params->merge_outputs = bidi_sequence_rnn_params->merge_outputs(); - params->asymmetric_quantize_inputs = - bidi_sequence_rnn_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_RNN: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) { - params->activation = - ConvertActivation(rnn_params->fused_activation_function()); - params->asymmetric_quantize_inputs = - rnn_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* embedding_params = - op->builtin_options_as_EmbeddingLookupSparseOptions()) { - params->combiner = parseCombinerType(embedding_params->combiner()); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - - case BuiltinOperator_HASHTABLE_LOOKUP: - // no-op. - return kTfLiteOk; - - case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_LocalResponseNormalizationOptions()) { - params->radius = schema_params->radius(); - params->bias = schema_params->bias(); - params->alpha = schema_params->alpha(); - params->beta = schema_params->beta(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_LSTM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) { - params->activation = - ConvertActivation(lstm_params->fused_activation_function()); - params->cell_clip = lstm_params->cell_clip(); - params->proj_clip = lstm_params->proj_clip(); - switch (lstm_params->kernel_type()) { - case LSTMKernelType_FULL: - params->kernel_type = kTfLiteLSTMFullKernel; - break; - case LSTMKernelType_BASIC: - params->kernel_type = kTfLiteLSTMBasicKernel; - break; - default: - TF_LITE_REPORT_ERROR(error_reporter, - "Unhandled LSTM kernel type: %d", - lstm_params->kernel_type()); - return kTfLiteError; - } - params->asymmetric_quantize_inputs = - lstm_params->asymmetric_quantize_inputs(); - } else { - TF_LITE_REPORT_ERROR(error_reporter, - "No valid LSTM builtin options exist"); - return kTfLiteError; - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* seq_lstm_params = - op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) { - params->activation = - ConvertActivation(seq_lstm_params->fused_activation_function()); - params->cell_clip = seq_lstm_params->cell_clip(); - params->proj_clip = seq_lstm_params->proj_clip(); - params->time_major = seq_lstm_params->time_major(); - params->asymmetric_quantize_inputs = - seq_lstm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bidi_lstm_params = - op->builtin_options_as_BidirectionalSequenceLSTMOptions()) { - params->activation = - ConvertActivation(bidi_lstm_params->fused_activation_function()); - params->cell_clip = bidi_lstm_params->cell_clip(); - params->proj_clip = bidi_lstm_params->proj_clip(); - params->merge_outputs = bidi_lstm_params->merge_outputs(); - params->time_major = bidi_lstm_params->time_major(); - params->asymmetric_quantize_inputs = - bidi_lstm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_SKIP_GRAM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* skip_gram_params = - op->builtin_options_as_SkipGramOptions()) { - params->ngram_size = skip_gram_params->ngram_size(); - params->max_skip_size = skip_gram_params->max_skip_size(); - params->include_all_ngrams = skip_gram_params->include_all_ngrams(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - - case BuiltinOperator_DEPTH_TO_SPACE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_DepthToSpaceOptions()) { - params->block_size = schema_params->block_size(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_GATHER: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - params->axis = 0; - if (const auto* gather_params = op->builtin_options_as_GatherOptions()) { - params->axis = gather_params->axis(); - } - - *builtin_data = params.release(); - return kTfLiteOk; - } - - case BuiltinOperator_SQUEEZE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) { - const auto* squeeze_dims = schema_params->squeeze_dims(); - if (squeeze_dims != nullptr) { - TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray( - sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims, - error_reporter, "squeeze")); - params->num_squeeze_dims = squeeze_dims->size(); - } else { - params->num_squeeze_dims = 0; - } - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_SPARSE_TO_DENSE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* sparse_to_dense_params = - op->builtin_options_as_SparseToDenseOptions()) { - params->validate_indices = sparse_to_dense_params->validate_indices(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_DELEGATE: { - // TODO(ycling): Revisit when supporting saving delegated models. - TF_LITE_REPORT_ERROR(error_reporter, - "DELEGATE op shouldn't exist in model."); - return kTfLiteError; - } - case BuiltinOperator_FAKE_QUANT: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = - op->builtin_options_as_FakeQuantOptions()) { - params->min = schema_params->min(); - params->max = schema_params->max(); - params->num_bits = schema_params->num_bits(); - params->narrow_range = schema_params->narrow_range(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_ONE_HOT: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) { - params->axis = schema_params->axis(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_LEAKY_RELU: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* leaky_relu_params = - op->builtin_options_as_LeakyReluOptions()) { - params->alpha = leaky_relu_params->alpha(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_MIRROR_PAD: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions(); - if (mirror_pad_params != nullptr) { - params->mode = - mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT - ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect - : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric; - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_UNIQUE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const auto* unique_params = op->builtin_options_as_UniqueOptions(); - if (unique_params != nullptr) { - params->index_out_type = - unique_params->idx_out_type() == tflite::TensorType_INT64 - ? TfLiteType::kTfLiteInt64 - : TfLiteType::kTfLiteInt32; - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_REVERSE_SEQUENCE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* reverse_seq_params = - op->builtin_options_as_ReverseSequenceOptions()) { - params->seq_dim = reverse_seq_params->seq_dim(); - params->batch_dim = reverse_seq_params->batch_dim(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_IF: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* if_params = op->builtin_options_as_IfOptions()) { - params->then_subgraph_index = if_params->then_subgraph_index(); - params->else_subgraph_index = if_params->else_subgraph_index(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_WHILE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* while_params = op->builtin_options_as_WhileOptions()) { - params->cond_subgraph_index = while_params->cond_subgraph_index(); - params->body_subgraph_index = while_params->body_subgraph_index(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_BATCH_MATMUL: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* bmm_params = - op->builtin_options_as_BatchMatMulOptions()) { - params->adj_x = bmm_params->adj_x(); - params->adj_y = bmm_params->adj_y(); - params->asymmetric_quantize_inputs = - bmm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_CALL_ONCE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* call_once_params = - op->builtin_options_as_CallOnceOptions()) { - params->init_subgraph_index = call_once_params->init_subgraph_index(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_CUMSUM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) { - params->exclusive = cumsum_params->exclusive(); - params->reverse = cumsum_params->reverse(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - // Below are the ops with no builtin_data structure. - case BuiltinOperator_BATCH_TO_SPACE_ND: - // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are - // ok for now, since there is no call implementation either. - case BuiltinOperator_CALL: - case BuiltinOperator_CONCAT_EMBEDDINGS: - case BuiltinOperator_COS: - case BuiltinOperator_CUSTOM: - case BuiltinOperator_ELU: - case BuiltinOperator_EMBEDDING_LOOKUP: - case BuiltinOperator_EQUAL: - case BuiltinOperator_EXPAND_DIMS: - case BuiltinOperator_LOG_SOFTMAX: - case BuiltinOperator_MATRIX_DIAG: - case BuiltinOperator_MATRIX_SET_DIAG: - case BuiltinOperator_RELU_N1_TO_1: - case BuiltinOperator_SELECT: - case BuiltinOperator_SELECT_V2: - case BuiltinOperator_SLICE: - case BuiltinOperator_SPACE_TO_BATCH_ND: - case BuiltinOperator_TILE: - case BuiltinOperator_TOPK_V2: - case BuiltinOperator_TRANSPOSE: - case BuiltinOperator_RANGE: - case BuiltinOperator_SQUARED_DIFFERENCE: - case BuiltinOperator_REVERSE_V2: - case BuiltinOperator_ADD_N: - case BuiltinOperator_GATHER_ND: - case BuiltinOperator_WHERE: - case BuiltinOperator_RANK: - case BuiltinOperator_NON_MAX_SUPPRESSION_V4: - case BuiltinOperator_NON_MAX_SUPPRESSION_V5: - case BuiltinOperator_SCATTER_ND: - case BuiltinOperator_DENSIFY: - case BuiltinOperator_SEGMENT_SUM: - case BuiltinOperator_BROADCAST_TO: - case BuiltinOperator_RFFT2D: - return kTfLiteOk; - case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES: - return kTfLiteError; - } - return kTfLiteError; -} // NOLINT[readability/fn_size] -#endif // !defined(TF_LITE_STATIC_MEMORY) -} // namespace - -TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, - ErrorReporter* error_reporter) { - switch (tensor_type) { - case TensorType_FLOAT16: - *type = kTfLiteFloat16; - return kTfLiteOk; - case TensorType_FLOAT32: - *type = kTfLiteFloat32; - return kTfLiteOk; - case TensorType_FLOAT64: - *type = kTfLiteFloat64; - return kTfLiteOk; - case TensorType_INT16: - *type = kTfLiteInt16; - return kTfLiteOk; - case TensorType_INT32: - *type = kTfLiteInt32; - return kTfLiteOk; - case TensorType_UINT8: - *type = kTfLiteUInt8; - return kTfLiteOk; - case TensorType_INT8: - *type = kTfLiteInt8; - return kTfLiteOk; - case TensorType_INT64: - *type = kTfLiteInt64; - return kTfLiteOk; - case TensorType_UINT64: - *type = kTfLiteUInt64; - return kTfLiteOk; - case TensorType_STRING: - *type = kTfLiteString; - return kTfLiteOk; - case TensorType_BOOL: - *type = kTfLiteBool; - return kTfLiteOk; - case TensorType_COMPLEX64: - *type = kTfLiteComplex64; - return kTfLiteOk; - case TensorType_COMPLEX128: - *type = kTfLiteComplex128; - return kTfLiteOk; - default: - *type = kTfLiteNoType; - TF_LITE_REPORT_ERROR(error_reporter, - "Unsupported data type %d in tensor\n", tensor_type); - return kTfLiteError; - } -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const AddOptions* schema_params = op->builtin_options_as_AddOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->pot_scale_int16 = schema_params->pot_scale_int16(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions(); - - if (schema_params != nullptr) { - TF_LITE_ENSURE_STATUS(ConvertTensorType( - schema_params->output_type(), ¶ms->output_type, error_reporter)); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions(); - - if (schema_params != nullptr) { - TF_LITE_ENSURE_STATUS(ConvertTensorType( - schema_params->output_type(), ¶ms->output_type, error_reporter)); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_CastOptions()) { - TF_LITE_ENSURE_STATUS(ConvertTensorType( - schema_params->in_data_type(), ¶ms->in_data_type, error_reporter)); - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(), - ¶ms->out_data_type, - error_reporter)); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseConcatenation(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ConcatenationOptions* schema_params = - op->builtin_options_as_ConcatenationOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->axis = schema_params->axis(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions(); - - if (schema_params != nullptr) { - params->padding = ConvertPadding(schema_params->padding()); - params->stride_width = schema_params->stride_w(); - params->stride_height = schema_params->stride_h(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - - params->dilation_width_factor = schema_params->dilation_w_factor(); - params->dilation_height_factor = schema_params->dilation_h_factor(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseDepthwiseConv2D(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const DepthwiseConv2DOptions* schema_params = - op->builtin_options_as_DepthwiseConv2DOptions(); - - if (schema_params != nullptr) { - params->padding = ConvertPadding(schema_params->padding()); - params->stride_width = schema_params->stride_w(); - params->stride_height = schema_params->stride_h(); - params->depth_multiplier = schema_params->depth_multiplier(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - - params->dilation_width_factor = schema_params->dilation_w_factor(); - params->dilation_height_factor = schema_params->dilation_h_factor(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* schema_params = op->builtin_options_as_DivOptions()) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseFullyConnected(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const FullyConnectedOptions* schema_params = - op->builtin_options_as_FullyConnectedOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->keep_num_dims = schema_params->keep_num_dims(); - params->asymmetric_quantize_inputs = - schema_params->asymmetric_quantize_inputs(); - - switch (schema_params->weights_format()) { - case FullyConnectedOptionsWeightsFormat_DEFAULT: - params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault; - break; - case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8: - params->weights_format = - kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8; - break; - default: - TF_LITE_REPORT_ERROR(error_reporter, - "Unhandled fully-connected weights format."); - return kTfLiteError; - } - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseGreater(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseL2Normalization(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const MulOptions* schema_params = op->builtin_options_as_MulOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const PackOptions* schema_params = op->builtin_options_as_PackOptions(); - - if (schema_params != nullptr) { - params->values_count = schema_params->values_count(); - params->axis = schema_params->axis(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions(); - - if (schema_params != nullptr) { - params->padding = ConvertPadding(schema_params->padding()); - params->stride_width = schema_params->stride_w(); - params->stride_height = schema_params->stride_h(); - params->filter_width = schema_params->filter_width(); - params->filter_height = schema_params->filter_height(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions(); - - if (schema_params != nullptr) { - params->keep_dims = schema_params->keep_dims(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions(); - - if (schema_params != nullptr) { - const flatbuffers::Vector* new_shape = schema_params->new_shape(); - if (new_shape != nullptr) { - TF_LITE_ENSURE_STATUS( - FlatBufferIntVectorToArray(sizeof(params->shape), new_shape, - params->shape, error_reporter, "reshape")); - params->num_dimensions = new_shape->size(); - } else { - // TODO(b/157480169) TODO(b/147203660): We should either return - // kTfLiteError or fill in some reasonable defaults in the params struct. - // We are not doing so until we better undertand the ramifications of - // changing the legacy behavior. - } - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseResizeBilinear(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ResizeBilinearOptions* schema_params = - op->builtin_options_as_ResizeBilinearOptions(); - - if (schema_params != nullptr) { - params->align_corners = schema_params->align_corners(); - params->half_pixel_centers = schema_params->half_pixel_centers(); - } else { - params->align_corners = false; - params->half_pixel_centers = false; - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ResizeNearestNeighborOptions* schema_params = - op->builtin_options_as_ResizeNearestNeighborOptions(); - - if (schema_params != nullptr) { - params->align_corners = schema_params->align_corners(); - params->half_pixel_centers = schema_params->half_pixel_centers(); - } else { - params->align_corners = false; - params->half_pixel_centers = false; - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions(); - - if (schema_params != nullptr) { - TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(), - ¶ms->out_type, error_reporter)); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions(); - - if (schema_params != nullptr) { - params->beta = schema_params->beta(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSpaceToDepth(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions(); - if (schema_params != nullptr) { - params->block_size = schema_params->block_size(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SplitOptions* schema_params = op->builtin_options_as_SplitOptions(); - - if (schema_params != nullptr) { - params->num_splits = schema_params->num_splits(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - SafeBuiltinDataAllocator safe_allocator(allocator); - - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions(); - - if (schema_params != nullptr) { - params->num_splits = schema_params->num_splits(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseStridedSlice(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const StridedSliceOptions* schema_params = - op->builtin_options_as_StridedSliceOptions(); - - if (schema_params != nullptr) { - params->begin_mask = schema_params->begin_mask(); - params->end_mask = schema_params->end_mask(); - params->ellipsis_mask = schema_params->ellipsis_mask(); - params->new_axis_mask = schema_params->new_axis_mask(); - params->shrink_axis_mask = schema_params->shrink_axis_mask(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SubOptions* schema_params = op->builtin_options_as_SubOptions(); - - if (schema_params != nullptr) { - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->pot_scale_int16 = schema_params->pot_scale_int16(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions(); - if (schema_params != nullptr) { - params->rank = schema_params->rank(); - params->activation = - ConvertActivation(schema_params->fused_activation_function()); - params->asymmetric_quantize_inputs = - schema_params->asymmetric_quantize_inputs(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*, - void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseTransposeConv(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const TransposeConvOptions* transpose_conv_params = - op->builtin_options_as_TransposeConvOptions(); - if (transpose_conv_params != nullptr) { - params->padding = ConvertPadding(transpose_conv_params->padding()); - params->stride_width = transpose_conv_params->stride_w(); - params->stride_height = transpose_conv_params->stride_h(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - *builtin_data = params.release(); - return kTfLiteOk; -} - -TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { - CheckParsePointerParams(op, error_reporter, allocator, builtin_data); - - SafeBuiltinDataAllocator safe_allocator(allocator); - std::unique_ptr - params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - - const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions(); - - if (schema_params != nullptr) { - params->num = schema_params->num(); - params->axis = schema_params->axis(); - } else { - // TODO(b/157480169): We should either return kTfLiteError or fill in some - // reasonable defaults in the params struct. We are not doing so until we - // better undertand the ramifications of changing the legacy behavior. - } - - *builtin_data = params.release(); - return kTfLiteOk; -} - -// We have this parse function instead of directly returning kTfLiteOk from the -// switch-case in ParseOpData because this function is used as part of the -// selective registration for the OpResolver implementation in micro. -TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*, - BuiltinDataAllocator*, void**) { - return kTfLiteOk; -} - -TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data) { -// TODO(b/145762662): It would be preferable to have the build graph for TF Lite -// Micro not have the ParseOpData function at all. This would require splitting -// the current file into two separate files, one of which defines the -// ParseOpData function and the other that defines the operator specific parse -// functions (e.g. ParseAdd). -// -// Such a split was attempted but was not worth the effort at the time because -// of the following reasons: -// * We could either duplicate the functions and the SafeBuiltinDataAllocator -// class in the anonymous namespace of this file, or attempt to make a common -// library with these helper functions and class. -// * Making a common library with a separate build target was not feasible as -// it introduced circular dependencies due to the ErrorReporter and a common -// .cc and .h within the same api build target the also cause circular -// dependencies due to the BuiltinDataAllocator class. -// * If all the builtin operators were to have their own parse functions, or we -// were ok with some amount of code duplication, then this split of the .cc -// files would be a lot more feasible. -#ifdef TF_LITE_STATIC_MEMORY - TF_LITE_REPORT_ERROR( - error_reporter, - "ParseOpData is unsupported on TfLiteMicro, please use the operator " - "specific parse functions (e.g. ParseAdd etc.).\n"); - return kTfLiteError; -#else - return ParseOpDataTfLite(op, op_type, error_reporter, allocator, - builtin_data); -#endif -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.h deleted file mode 100644 index 9f6131fa..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/flatbuffer_conversions.h +++ /dev/null @@ -1,301 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ -#define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ - -// These functions transform codes and data structures that are defined in the -// flatbuffer serialization format into in-memory values that are used by the -// runtime API and interpreter. - -#include -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// Interface class for builtin data allocations. -class BuiltinDataAllocator { - public: - virtual void* Allocate(size_t size, size_t alignment_hint) = 0; - virtual void Deallocate(void* data) = 0; - - // Allocate a structure, but make sure it is a POD structure that doesn't - // require constructors to run. The reason we do this, is that Interpreter's C - // extension part will take ownership so destructors will not be run during - // deallocation. - template - T* AllocatePOD() { - // TODO(b/154346074): Change this to is_trivially_destructible when all - // platform targets support that properly. - static_assert(std::is_pod::value, "Builtin data structure must be POD."); - void* allocated_memory = this->Allocate(sizeof(T), alignof(T)); - return new (allocated_memory) T(); - } - - virtual ~BuiltinDataAllocator() {} -}; - -// Parse the appropriate data out of the op. -// -// This handles builtin data explicitly as there are flatbuffer schemas. -// If it returns kTfLiteOk, it passes the data out with `builtin_data`. The -// calling function has to pass in an allocator object, and this allocator -// will be called to reserve space for the output data. If the calling -// function's allocator reserves memory on the heap, then it's the calling -// function's responsibility to free it. -// If it returns kTfLiteError, `builtin_data` will be `nullptr`. -TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -// Converts the tensor data type used in the flat buffer to the representation -// used by the runtime. -TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, - ErrorReporter* error_reporter); - -TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseConcatenation(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseDepthwiseConv2D(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseFullyConnected(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseGreaterEqual(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseL2Normalization(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseResizeBilinear(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSpaceToDepth(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseStridedSlice(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseTransposeConv(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, void** builtin_data); - -TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/op_resolver.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/op_resolver.cc deleted file mode 100644 index c5dffb63..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/op_resolver.cc +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/core/api/op_resolver.h" - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_utils.h" - -namespace tflite { - -TfLiteStatus GetRegistrationFromOpCode( - const OperatorCode* opcode, const OpResolver& op_resolver, - ErrorReporter* error_reporter, const TfLiteRegistration** registration) { - TfLiteStatus status = kTfLiteOk; - *registration = nullptr; - auto builtin_code = GetBuiltinCode(opcode); - int version = opcode->version(); - - if (builtin_code > BuiltinOperator_MAX || - builtin_code < BuiltinOperator_MIN) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Op builtin_code out of range: %d. Are you using old TFLite binary " - "with newer model?", - builtin_code); - status = kTfLiteError; - } else if (builtin_code != BuiltinOperator_CUSTOM) { - *registration = op_resolver.FindOp(builtin_code, version); - if (*registration == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Didn't find op for builtin opcode '%s' version '%d'\n", - EnumNameBuiltinOperator(builtin_code), version); - status = kTfLiteError; - } - } else if (!opcode->custom_code()) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Operator with CUSTOM builtin_code has no custom_code.\n"); - status = kTfLiteError; - } else { - const char* name = opcode->custom_code()->c_str(); - *registration = op_resolver.FindOp(name, version); - if (*registration == nullptr) { - // Do not report error for unresolved custom op, we do the final check - // while preparing ops. - status = kTfLiteError; - } - } - return status; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/op_resolver.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/op_resolver.h deleted file mode 100644 index b6a8171d..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/op_resolver.h +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ -#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -/// Abstract interface that returns TfLiteRegistrations given op codes or custom -/// op names. This is the mechanism that ops being referenced in the flatbuffer -/// model are mapped to executable function pointers (TfLiteRegistrations). -class OpResolver { - public: - /// Finds the op registration for a builtin operator by enum code. - virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op, - int version) const = 0; - /// Finds the op registration of a custom operator by op name. - virtual const TfLiteRegistration* FindOp(const char* op, - int version) const = 0; - - // Returns optional delegates for resolving and handling ops in the flatbuffer - // model. This may be used in addition to the standard TfLiteRegistration - // lookup for graph resolution. - using TfLiteDelegatePtrVector = - std::vector>; - virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const { - return TfLiteDelegatePtrVector(); - } - - virtual ~OpResolver() {} -}; - -// Handles the logic for converting between an OperatorCode structure extracted -// from a flatbuffer and information about a registered operator -// implementation. -TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, - const OpResolver& op_resolver, - ErrorReporter* error_reporter, - const TfLiteRegistration** registration); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/profiler.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/profiler.h deleted file mode 100644 index f2dd12c2..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/profiler.h +++ /dev/null @@ -1,194 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_ -#define TENSORFLOW_LITE_CORE_API_PROFILER_H_ - -#include - -namespace tflite { - -// A simple utility for enabling profiled event tracing in TensorFlow Lite. -class Profiler { - public: - // As certain Profiler instance might be only interested in certain event - // types, we define each event type value to allow a Profiler to use - // bitmasking bitwise operations to determine whether an event should be - // recorded or not. - enum class EventType { - // Default event type, the metadata field has no special significance. - DEFAULT = 1, - - // The event is an operator invocation and the event_metadata field is the - // index of operator node. - OPERATOR_INVOKE_EVENT = 2, - - // The event is an invocation for an internal operator of a TFLite delegate. - // The event_metadata field is the index of operator node that's specific to - // the delegate. - DELEGATE_OPERATOR_INVOKE_EVENT = 4, - - // The event is a recording of runtime instrumentation such as the overall - // TFLite runtime status, the TFLite delegate status (if a delegate - // is applied), and the overall model inference latency etc. - // Note, the delegate status and overall status are stored as separate - // event_metadata fields. In particular, the delegate status is encoded - // as DelegateStatus::full_status(). - GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8, - }; - - virtual ~Profiler() {} - - // Signals the beginning of an event and returns a handle to the profile - // event. The `event_metadata1` and `event_metadata2` have different - // interpretations based on the actual Profiler instance and the `event_type`. - // For example, as for the 'SubgraphAwareProfiler' defined in - // lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT, - // `event_metadata1` represents the index of a TFLite node, and - // `event_metadata2` represents the index of the subgraph that this event - // comes from. - virtual uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) = 0; - // Similar w/ the above, but `event_metadata2` defaults to 0. - uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata) { - return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0); - } - - // Signals an end to the specified profile event with 'event_metadata's, This - // is useful when 'event_metadata's are not available when the event begins - // or when one wants to overwrite the 'event_metadata's set at the beginning. - virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1, - int64_t event_metadata2) {} - // Signals an end to the specified profile event. - virtual void EndEvent(uint32_t event_handle) = 0; - - // Appends an event of type 'event_type' with 'tag' and 'event_metadata' - // which started at 'start' and ended at 'end' - // Note: - // In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used - // they assume the value is in "usec", if in any case subclasses - // didn't put usec, then the values are not meaningful. - // TODO karimnosseir: Revisit and make the function more clear. - void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata) { - AddEvent(tag, event_type, start, end, event_metadata, - /*event_metadata2*/ 0); - } - - virtual void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata1, - int64_t event_metadata2) {} - - protected: - friend class ScopedProfile; -}; - -// Adds a profile event to `profiler` that begins with the construction -// of the object and ends when the object goes out of scope. -// The lifetime of tag should be at least the lifetime of `profiler`. -// `profiler` may be null, in which case nothing is profiled. -class ScopedProfile { - public: - ScopedProfile(Profiler* profiler, const char* tag, - Profiler::EventType event_type = Profiler::EventType::DEFAULT, - int64_t event_metadata = 0) - : profiler_(profiler), event_handle_(0) { - if (profiler) { - event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata); - } - } - - ~ScopedProfile() { - if (profiler_) { - profiler_->EndEvent(event_handle_); - } - } - - protected: - Profiler* profiler_; - uint32_t event_handle_; -}; - -class ScopedOperatorProfile : public ScopedProfile { - public: - ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index) - : ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT, - static_cast(node_index)) {} -}; - -class ScopedDelegateOperatorProfile : public ScopedProfile { - public: - ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag, - int node_index) - : ScopedProfile(profiler, tag, - Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT, - static_cast(node_index)) {} -}; - -class ScopedRuntimeInstrumentationProfile : public ScopedProfile { - public: - ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag) - : ScopedProfile( - profiler, tag, - Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {} - - void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) { - if (profiler_) { - delegate_status_ = delegate_status; - interpreter_status_ = interpreter_status; - } - } - - ~ScopedRuntimeInstrumentationProfile() { - if (profiler_) { - profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_); - } - } - - private: - int64_t delegate_status_; - int64_t interpreter_status_; -}; - -} // namespace tflite - -#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr -#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr) - -#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \ - tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \ - (profiler), (tag)) - -#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \ - tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \ - (profiler), (tag), (node_index)) - -#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \ - tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \ - _profile_, __COUNTER__)((profiler), (tag), (node_index)) - -#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \ - profiler, tag, event_metadata1, event_metadata2) \ - do { \ - if (profiler) { \ - const auto handle = profiler->BeginEvent( \ - tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \ - event_metadata1, event_metadata2); \ - profiler->EndEvent(handle); \ - } \ - } while (false); - -#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/tensor_utils.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/tensor_utils.cc deleted file mode 100644 index 3aac16b6..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/tensor_utils.cc +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/core/api/tensor_utils.h" - -#include - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) { - if (!tensor->is_variable) { - return kTfLiteOk; - } - // TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it - // to the value of the buffer. - int value = 0; - if (tensor->type == kTfLiteInt8) { - value = tensor->params.zero_point; - } - // TODO(b/139446230): Provide a platform header to better handle these - // specific scenarios. -#if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \ - defined(__i386) || defined(__x86__) || defined(__X86__) || \ - defined(_X86_) || defined(_M_IX86) || defined(_M_X64) - memset(tensor->data.raw, value, tensor->bytes); -#else - char* raw_ptr = tensor->data.raw; - for (size_t i = 0; i < tensor->bytes; ++i) { - *raw_ptr = value; - raw_ptr++; - } -#endif - return kTfLiteOk; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/tensor_utils.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/tensor_utils.h deleted file mode 100644 index 9f1cf94a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/core/api/tensor_utils.h +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ -#define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -// Resets a variable tensor to the default value. -TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/common.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/common.h deleted file mode 100644 index ad677ed5..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/common.h +++ /dev/null @@ -1,1037 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ - -#ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK -#ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -#define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK -#endif -#endif - -#include - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/optimized/neon_check.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -constexpr int kReverseShift = -1; - -inline void GetActivationMinMax(FusedActivationFunctionType ac, - float* output_activation_min, - float* output_activation_max) { - switch (ac) { - case FusedActivationFunctionType::kNone: - *output_activation_min = std::numeric_limits::lowest(); - *output_activation_max = std::numeric_limits::max(); - break; - case FusedActivationFunctionType::kRelu: - *output_activation_min = 0.f; - *output_activation_max = std::numeric_limits::max(); - break; - case FusedActivationFunctionType::kRelu1: - *output_activation_min = -1.f; - *output_activation_max = 1.f; - break; - case FusedActivationFunctionType::kRelu6: - *output_activation_min = 0.f; - *output_activation_max = 6.f; - break; - } -} - -template -inline T ActivationFunctionWithMinMax(T x, T output_activation_min, - T output_activation_max) { - using std::max; - using std::min; - return min(max(x, output_activation_min), output_activation_max); -} - -// Legacy function, left for compatibility only. -template -float ActivationFunction(float x) { - float output_activation_min, output_activation_max; - GetActivationMinMax(Ac, &output_activation_min, &output_activation_max); - return ActivationFunctionWithMinMax(x, output_activation_min, - output_activation_max); -} - -inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, - const float* bias_data, int array_size, - float* array_data) { - // Note: see b/132215220: in May 2019 we thought it would be OK to replace - // this with the Eigen one-liner: - // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). - // This turned out to severely regress performance: +4ms (i.e. 8%) on - // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now. - TFLITE_DCHECK_EQ((array_size % bias_size), 0); -#ifdef USE_NEON - float* array_ptr = array_data; - float* array_end_ptr = array_ptr + array_size; - const auto clamp_min_vec = vdupq_n_f32(clamp_min); - const auto clamp_max_vec = vdupq_n_f32(clamp_max); - for (; array_ptr != array_end_ptr; array_ptr += bias_size) { - int i = 0; - for (; i <= bias_size - 16; i += 16) { - auto b0 = vld1q_f32(bias_data + i); - auto b1 = vld1q_f32(bias_data + i + 4); - auto b2 = vld1q_f32(bias_data + i + 8); - auto b3 = vld1q_f32(bias_data + i + 12); - auto a0 = vld1q_f32(array_ptr + i); - auto a1 = vld1q_f32(array_ptr + i + 4); - auto a2 = vld1q_f32(array_ptr + i + 8); - auto a3 = vld1q_f32(array_ptr + i + 12); - auto x0 = vaddq_f32(a0, b0); - auto x1 = vaddq_f32(a1, b1); - auto x2 = vaddq_f32(a2, b2); - auto x3 = vaddq_f32(a3, b3); - x0 = vmaxq_f32(clamp_min_vec, x0); - x1 = vmaxq_f32(clamp_min_vec, x1); - x2 = vmaxq_f32(clamp_min_vec, x2); - x3 = vmaxq_f32(clamp_min_vec, x3); - x0 = vminq_f32(clamp_max_vec, x0); - x1 = vminq_f32(clamp_max_vec, x1); - x2 = vminq_f32(clamp_max_vec, x2); - x3 = vminq_f32(clamp_max_vec, x3); - vst1q_f32(array_ptr + i, x0); - vst1q_f32(array_ptr + i + 4, x1); - vst1q_f32(array_ptr + i + 8, x2); - vst1q_f32(array_ptr + i + 12, x3); - } - for (; i <= bias_size - 4; i += 4) { - auto b = vld1q_f32(bias_data + i); - auto a = vld1q_f32(array_ptr + i); - auto x = vaddq_f32(a, b); - x = vmaxq_f32(clamp_min_vec, x); - x = vminq_f32(clamp_max_vec, x); - vst1q_f32(array_ptr + i, x); - } - for (; i < bias_size; i++) { - array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i], - clamp_min, clamp_max); - } - } -#else // not NEON - for (int array_offset = 0; array_offset < array_size; - array_offset += bias_size) { - for (int i = 0; i < bias_size; i++) { - array_data[array_offset + i] = ActivationFunctionWithMinMax( - array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max); - } - } -#endif -} - -inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp( - int32_t x, int32_t quantized_multiplier, int left_shift) { - using gemmlowp::RoundingDivideByPOT; - using gemmlowp::SaturatingRoundingDoublingHighMul; - return RoundingDivideByPOT( - SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift); -} - -inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne( - int32_t x, int32_t quantized_multiplier, int left_shift) { - using gemmlowp::SaturatingRoundingDoublingHighMul; - return SaturatingRoundingDoublingHighMul(x * (1 << left_shift), - quantized_multiplier); -} - -inline int32_t MultiplyByQuantizedMultiplier(int32_t x, - int32_t quantized_multiplier, - int shift) { - using gemmlowp::RoundingDivideByPOT; - using gemmlowp::SaturatingRoundingDoublingHighMul; - int left_shift = shift > 0 ? shift : 0; - int right_shift = shift > 0 ? 0 : -shift; - return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul( - x * (1 << left_shift), quantized_multiplier), - right_shift); -} - -inline int32_t MultiplyByQuantizedMultiplier(int64_t x, - int32_t quantized_multiplier, - int shift) { - // Inputs: - // - quantized_multiplier has fixed point at bit 31 - // - shift is -31 to +7 (negative for right shift) - // - // Assumptions: The following input ranges are assumed - // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1) - // - scaling is chosen so final scaled result fits in int32_t - // - input x is in the range -(1<<47) <= x < (1<<47) - assert(quantized_multiplier >= 0); - assert(shift >= -31 && shift < 8); - assert(x >= -(static_cast(1) << 47) && - x < (static_cast(1) << 47)); - - int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000) - ? ((quantized_multiplier + (1 << 15)) >> 16) - : 0x7FFF; - int total_shift = 15 - shift; - x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1)); - int32_t result = x >> total_shift; - return result; -} - -#ifdef USE_NEON -// Round uses ARM's rounding shift right. -inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( - int32x4x4_t input_val, int32_t quantized_multiplier, int shift) { - const int left_shift = std::max(shift, 0); - const int right_shift = std::min(shift, 0); - int32x4x4_t result; - - int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier); - int32x4_t left_shift_dup = vdupq_n_s32(left_shift); - int32x4_t right_shift_dup = vdupq_n_s32(right_shift); - - result.val[0] = - vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), - multiplier_dup), - right_shift_dup); - - result.val[1] = - vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), - multiplier_dup), - right_shift_dup); - - result.val[2] = - vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), - multiplier_dup), - right_shift_dup); - - result.val[3] = - vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), - multiplier_dup), - right_shift_dup); - - return result; -} -#endif - -template -int CountLeadingZeros(T integer_input) { - static_assert(std::is_unsigned::value, - "Only unsigned integer types handled."); -#if defined(__GNUC__) - return integer_input ? __builtin_clz(integer_input) - : std::numeric_limits::digits; -#else - if (integer_input == 0) { - return std::numeric_limits::digits; - } - - const T one_in_leading_positive = static_cast(1) - << (std::numeric_limits::digits - 1); - int leading_zeros = 0; - while (integer_input < one_in_leading_positive) { - integer_input <<= 1; - ++leading_zeros; - } - return leading_zeros; -#endif -} - -template -inline int CountLeadingSignBits(T integer_input) { - static_assert(std::is_signed::value, "Only signed integer types handled."); -#if defined(__GNUC__) && !defined(__clang__) - return integer_input ? __builtin_clrsb(integer_input) - : std::numeric_limits::digits; -#else - using U = typename std::make_unsigned::type; - return integer_input >= 0 - ? CountLeadingZeros(static_cast(integer_input)) - 1 - : integer_input != std::numeric_limits::min() - ? CountLeadingZeros(2 * static_cast(-integer_input) - 1) - : 0; -#endif -} - -// Use "count leading zeros" helper functions to do a fast Floor(log_2(x)). -template -inline Integer FloorLog2(Integer n) { - static_assert(std::is_integral::value, ""); - static_assert(std::is_signed::value, ""); - static_assert(sizeof(Integer) == 4 || sizeof(Integer) == 8, ""); - TFLITE_CHECK_GT(n, 0); - if (sizeof(Integer) == 4) { - return 30 - CountLeadingSignBits(n); - } else { - return 62 - CountLeadingSignBits(n); - } -} - -// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in -// softmax -// func - the function to build the LUT for (e.g exp(x)) -// min,max - table limits -// table - pointer to buffer -// num - number of elements in the LUT -inline void gen_lut(double (*func)(double), double min, double max, - int16_t* table, const int num) { - // size of table should equal to num + 1 - // last element only for slope calculation - double step = (max - min) / (num - 1); - double half_step = step / 2.0; - for (int i = 0; i < num - 1; i++) { - double sample_val = TfLiteRound(func(min + i * step) * 32768.0); - double midpoint_interp_val = - TfLiteRound((func(min + (i + 1) * step) * 32768.0 + - TfLiteRound(func(min + i * step) * 32768.0)) / - 2.0); - double midpoint_val = - TfLiteRound(func(min + i * step + half_step) * 32768.0); - double midpoint_err = midpoint_interp_val - midpoint_val; - double bias = TfLiteRound(midpoint_err / 2.0); - table[i] = std::min(std::max(sample_val - bias, -32768.0), - 32767.0); - } - table[num - 1] = std::min( - std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0); -} - -// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in -// softmax -// func - the function to build the LUT for (e.g exp(x)) -// min,max - table limits -// table - pointer to buffer -// num - number of elements in the LUT -inline void gen_lut(float (*func)(float), float min, float max, int16_t* table, - const int num) { - // size of table should equal to num + 1 - // last element only for slope calculation - float step = (max - min) / (num - 1); - float half_step = step / 2.0f; - for (int i = 0; i < num - 1; i++) { - float sample_val = TfLiteRound(func(min + i * step) * 32768.0f); - float midpoint_interp_val = - TfLiteRound((func(min + (i + 1) * step) * 32768.0f + - TfLiteRound(func(min + i * step) * 32768.0f)) / - 2.0f); - float midpoint_val = - TfLiteRound(func(min + i * step + half_step) * 32768.0f); - float midpoint_err = midpoint_interp_val - midpoint_val; - float bias = TfLiteRound(midpoint_err / 2.0f); - table[i] = std::min(std::max(sample_val - bias, -32768.0f), - 32767.0f); - } - table[num - 1] = std::min( - std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f); -} - -// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax -inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) { - // 512 base value, lut[513] only for calculate slope - uint16_t index = static_cast(256 + (value >> 7)); - assert(index < 512 && "LUT index out of range."); - int16_t offset = value & 0x7f; - - // base and slope are Q0.15 - int16_t base = lut[index]; - int16_t slope = lut[index + 1] - lut[index]; - - // Q0.15 * Q0.7 = Q0.22 - // Round and convert from Q0.22 to Q0.15 - int32_t delta = (static_cast(slope) * offset + 64) >> 7; - - // Q0.15 + Q0.15 - return base + delta; -} - -// Table of sigmoid(i/24) at 0.16 format - 256 elements. - -// We use combined sigmoid and tanh look-up table, since -// tanh(x) = 2*sigmoid(2*x) -1. -// Both functions are symmetric, so the LUT table is only needed -// for the absolute value of the input. -static const uint16_t sigmoid_table_uint16[256] = { - 32768, 33451, 34133, 34813, 35493, 36169, 36843, 37513, 38180, 38841, 39498, - 40149, 40794, 41432, 42064, 42688, 43304, 43912, 44511, 45102, 45683, 46255, - 46817, 47369, 47911, 48443, 48964, 49475, 49975, 50464, 50942, 51409, 51865, - 52311, 52745, 53169, 53581, 53983, 54374, 54755, 55125, 55485, 55834, 56174, - 56503, 56823, 57133, 57433, 57724, 58007, 58280, 58544, 58800, 59048, 59288, - 59519, 59743, 59959, 60168, 60370, 60565, 60753, 60935, 61110, 61279, 61441, - 61599, 61750, 61896, 62036, 62172, 62302, 62428, 62549, 62666, 62778, 62886, - 62990, 63090, 63186, 63279, 63368, 63454, 63536, 63615, 63691, 63765, 63835, - 63903, 63968, 64030, 64090, 64148, 64204, 64257, 64308, 64357, 64405, 64450, - 64494, 64536, 64576, 64614, 64652, 64687, 64721, 64754, 64786, 64816, 64845, - 64873, 64900, 64926, 64950, 64974, 64997, 65019, 65039, 65060, 65079, 65097, - 65115, 65132, 65149, 65164, 65179, 65194, 65208, 65221, 65234, 65246, 65258, - 65269, 65280, 65291, 65301, 65310, 65319, 65328, 65337, 65345, 65352, 65360, - 65367, 65374, 65381, 65387, 65393, 65399, 65404, 65410, 65415, 65420, 65425, - 65429, 65433, 65438, 65442, 65445, 65449, 65453, 65456, 65459, 65462, 65465, - 65468, 65471, 65474, 65476, 65479, 65481, 65483, 65485, 65488, 65489, 65491, - 65493, 65495, 65497, 65498, 65500, 65501, 65503, 65504, 65505, 65507, 65508, - 65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65517, 65518, - 65519, 65520, 65520, 65521, 65522, 65522, 65523, 65523, 65524, 65524, 65525, - 65525, 65526, 65526, 65526, 65527, 65527, 65528, 65528, 65528, 65529, 65529, - 65529, 65529, 65530, 65530, 65530, 65530, 65531, 65531, 65531, 65531, 65531, - 65532, 65532, 65532, 65532, 65532, 65532, 65533, 65533, 65533, 65533, 65533, - 65533, 65533, 65533, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534, - 65534, 65534, 65535}; - -// TODO(b/77858996): Add these to gemmlowp. -template -IntegerType SaturatingAddNonGemmlowp(IntegerType a, IntegerType b) { - static_assert(std::is_same::value, "unimplemented"); - return a; -} - -template <> -inline std::int32_t SaturatingAddNonGemmlowp(std::int32_t a, std::int32_t b) { - std::int64_t a64 = a; - std::int64_t b64 = b; - std::int64_t sum = a64 + b64; - return static_cast(std::min( - static_cast(std::numeric_limits::max()), - std::max( - static_cast(std::numeric_limits::min()), - sum))); -} - -template -gemmlowp::FixedPoint SaturatingAddNonGemmlowp( - gemmlowp::FixedPoint a, - gemmlowp::FixedPoint b) { - return gemmlowp::FixedPoint::FromRaw( - SaturatingAddNonGemmlowp(a.raw(), b.raw())); -} - -template -IntegerType SaturatingSub(IntegerType a, IntegerType b) { - static_assert(std::is_same::value, "unimplemented"); - return a; -} - -template <> -inline std::int16_t SaturatingSub(std::int16_t a, std::int16_t b) { - std::int32_t a32 = a; - std::int32_t b32 = b; - std::int32_t diff = a32 - b32; - return static_cast( - std::min(static_cast(32767), - std::max(static_cast(-32768), diff))); -} - -template <> -inline std::int32_t SaturatingSub(std::int32_t a, std::int32_t b) { - std::int64_t a64 = a; - std::int64_t b64 = b; - std::int64_t diff = a64 - b64; - return static_cast(std::min( - static_cast(std::numeric_limits::max()), - std::max( - static_cast(std::numeric_limits::min()), - diff))); -} - -template -gemmlowp::FixedPoint SaturatingSub( - gemmlowp::FixedPoint a, - gemmlowp::FixedPoint b) { - return gemmlowp::FixedPoint::FromRaw( - SaturatingSub(a.raw(), b.raw())); -} -// End section to be moved to gemmlowp. - -template -IntegerType SaturatingRoundingMultiplyByPOTParam(IntegerType x, int exponent) { - if (exponent == 0) { - return x; - } - using ScalarIntegerType = - typename gemmlowp::FixedPointRawTypeTraits::ScalarRawType; - const IntegerType min = - gemmlowp::Dup(std::numeric_limits::min()); - const IntegerType max = - gemmlowp::Dup(std::numeric_limits::max()); - const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType); - - const std::int32_t threshold = - ((1 << (ScalarIntegerTypeBits - 1 - exponent)) - 1); - const IntegerType positive_mask = - gemmlowp::MaskIfGreaterThan(x, gemmlowp::Dup(threshold)); - const IntegerType negative_mask = - gemmlowp::MaskIfLessThan(x, gemmlowp::Dup(-threshold)); - - IntegerType result = gemmlowp::ShiftLeft(x, exponent); - result = gemmlowp::SelectUsingMask(positive_mask, max, result); - result = gemmlowp::SelectUsingMask(negative_mask, min, result); - return result; -} - -// If we want to leave IntegerBits fixed, then multiplication -// by a power of two has to be saturating/rounding, not exact anymore. -template -gemmlowp::FixedPoint -SaturatingRoundingMultiplyByPOTParam( - gemmlowp::FixedPoint a, int exponent) { - return gemmlowp::FixedPoint::FromRaw( - SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent)); -} - -// Convert int32_t multiplier to int16_t with rounding. -inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t, - int16_t* multiplier_int16_t) { - TFLITE_DCHECK_GE(multiplier_int32_t, 0); - static constexpr int32_t kRoundingOffset = 1 << 15; - if (multiplier_int32_t >= - std::numeric_limits::max() - kRoundingOffset) { - *multiplier_int16_t = std::numeric_limits::max(); - return; - } - const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16; - TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset); - TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset); - *multiplier_int16_t = result; - TFLITE_DCHECK_EQ(*multiplier_int16_t, result); -} - -// Minimum output bits to accommodate log of maximum input range. It actually -// does not matter if one considers, say, [-64,64] or [-64,64). -// -// For example, run this through Octave: -// [0:127; ... -// ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ... -// ceil(log(abs( log(2.^(0:127))+1 ))/log(2))] -constexpr int min_log_x_output_bits(int input_bits) { - return input_bits > 90 ? 7 - : input_bits > 44 ? 6 - : input_bits > 21 ? 5 - : input_bits > 10 ? 4 - : input_bits > 4 ? 3 - : input_bits > 1 ? 2 - : 1; -} - -// Although currently the name of this function says that it cannot handle -// values less than 1, in practice it can handle as low as 1/x_max, where -// x_max is the largest representable input. In other words, the output range -// is symmetric. -template -inline gemmlowp::FixedPoint -log_x_for_x_greater_than_or_equal_to_1_impl( - gemmlowp::FixedPoint input_val) { - // CHECK(__builtin_clz(0u) >= std::numeric_limits::digits - 1); - // CHECK(__builtin_clz(0u) <= std::numeric_limits::digits); - using FixedPoint0 = gemmlowp::FixedPoint; - // The reason for accumulating the result with an extra bit of headroom is - // that z_pow_2_adj * log_2 might be saturated, and adding num_scaled * - // recip_denom will otherwise introduce an error. - static constexpr int kAccumIntegerBits = OutputIntegerBits + 1; - using FixedPointAccum = gemmlowp::FixedPoint; - - const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 1488522236, std::log(2.0)); - const FixedPoint0 sqrt_sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 1805811301, std::sqrt(std::sqrt(0.5))); - const FixedPoint0 sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 1518500250, std::sqrt(0.5)); - const FixedPoint0 one_quarter = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPoint0, 536870912, 1.0 / 4.0); - - const FixedPoint0 alpha_n = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 117049297, 11.0 / 240.0 * std::sqrt(std::sqrt(2.0))); - const FixedPoint0 alpha_d = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 127690142, 1.0 / 20.0 * std::sqrt(std::sqrt(2.0))); - const FixedPoint0 alpha_i = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 1057819769, - 2.0 / std::sqrt(std::sqrt(2.0)) - std::sqrt(std::sqrt(2.0))); - const FixedPoint0 alpha_f = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( - FixedPoint0, 638450708, 1.0 / 4.0 * std::sqrt(std::sqrt(2.0))); - - const FixedPointAccum shifted_quarter = - gemmlowp::Rescale(one_quarter); - - // Reinterpret the input value as Q0.31, because we will figure out the - // required shift "ourselves" instead of using, say, Rescale. - FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw()); - // z_a_pow_2 = input_integer_bits - z_a_headroom; - int z_a_headroom_plus_1 = CountLeadingZeros(static_cast(z_a.raw())); - FixedPoint0 r_a_tmp = - SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1)); - const int32_t r_a_raw = - SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1); - // z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25); - // z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25, - // InputIntegerBits - z_b_headroom - 0.25); - const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp( - FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam( - InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)), - shifted_quarter); - - // z_b is treated like z_a, but premultiplying by sqrt(0.5). - FixedPoint0 z_b = z_a * sqrt_half; - int z_b_headroom = CountLeadingZeros(static_cast(z_b.raw())) - 1; - const int32_t r_b_raw = - SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom); - const FixedPointAccum z_b_pow_2_adj = SaturatingSub( - FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam( - InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)), - shifted_quarter); - - const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw)); - const FixedPointAccum z_pow_2_adj = FixedPointAccum::FromRaw( - std::max(z_a_pow_2_adj.raw(), z_b_pow_2_adj.raw())); - - const FixedPoint0 p = gemmlowp::RoundingHalfSum(r, sqrt_sqrt_half); - FixedPoint0 q = r - sqrt_sqrt_half; - q = q + q; - - const FixedPoint0 common_sq = q * q; - const FixedPoint0 num = q * r + q * common_sq * alpha_n; - const FixedPoint0 denom_minus_one_0 = - p * (alpha_i + q + alpha_d * common_sq) + alpha_f * q; - const FixedPoint0 recip_denom = - one_over_one_plus_x_for_x_in_0_1(denom_minus_one_0); - - const FixedPointAccum num_scaled = gemmlowp::Rescale(num); - return gemmlowp::Rescale(z_pow_2_adj * log_2 + - num_scaled * recip_denom); -} - -template -inline gemmlowp::FixedPoint -log_x_for_x_greater_than_or_equal_to_1( - gemmlowp::FixedPoint input_val) { - static_assert( - OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits), - "Output integer bits must be sufficient to accommodate logs of inputs."); - return log_x_for_x_greater_than_or_equal_to_1_impl( - input_val); -} - -inline int32_t GetReciprocal(int32_t x, int x_integer_digits, - int* num_bits_over_unit) { - int headroom_plus_one = CountLeadingZeros(static_cast(x)); - // This is the number of bits to the left of the binary point above 1.0. - // Consider x=1.25. In that case shifted_scale=0.8 and - // no later adjustment will be needed. - *num_bits_over_unit = x_integer_digits - headroom_plus_one; - const int32_t shifted_sum_minus_one = - static_cast((static_cast(x) << headroom_plus_one) - - (static_cast(1) << 31)); - - gemmlowp::FixedPoint shifted_scale = - gemmlowp::one_over_one_plus_x_for_x_in_0_1( - gemmlowp::FixedPoint::FromRaw(shifted_sum_minus_one)); - return shifted_scale.raw(); -} - -inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift, - int32_t* output_inv_sqrt, - int* output_shift) { - TFLITE_DCHECK_GE(input, 0); - if (input <= 1) { - // Handle the input value 1 separately to avoid overflow in that case - // in the general computation below (b/143972021). Also handle 0 as if it - // were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid - // but rare/unrealistic input value. We can expect both to occur in some - // incompletely trained models, but probably not in fully trained models. - *output_inv_sqrt = std::numeric_limits::max(); - *output_shift = 0; - return; - } - TFLITE_DCHECK_GT(input, 1); - *output_shift = 11; - while (input >= (1 << 29)) { - input /= 4; - ++*output_shift; - } - const unsigned max_left_shift_bits = - CountLeadingZeros(static_cast(input)) - 1; - const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2; - const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1; - *output_shift -= left_shift_bit_pairs; - input <<= 2 * left_shift_bit_pairs; - TFLITE_DCHECK_GE(input, (1 << 27)); - TFLITE_DCHECK_LT(input, (1 << 29)); - using gemmlowp::FixedPoint; - using gemmlowp::Rescale; - using gemmlowp::SaturatingRoundingMultiplyByPOT; - // Using 3 integer bits gives us enough room for the internal arithmetic in - // this Newton-Raphson iteration. - using F3 = FixedPoint; - using F0 = FixedPoint; - const F3 fixedpoint_input = F3::FromRaw(input >> 1); - const F3 fixedpoint_half_input = - SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input); - const F3 fixedpoint_half_three = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5); - // Newton-Raphson iteration - // Naive unoptimized starting guess: x = 1 - F3 x = F3::One(); - // Naive unoptimized number of iterations: 5 - for (int i = 0; i < 5; i++) { - const F3 x3 = Rescale<3>(x * x * x); - x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3); - } - const F0 fixedpoint_half_sqrt_2 = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.); - x = x * fixedpoint_half_sqrt_2; - *output_inv_sqrt = x.raw(); - if (*output_shift < 0) { - *output_inv_sqrt <<= -*output_shift; - *output_shift = 0; - } - // Convert right shift (right is positive) to left shift. - *output_shift *= reverse_shift; -} - -// DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING -// BROADCASTING. -// -// NdArrayDesc describes the shape and memory layout of an N-dimensional -// rectangular array of numbers. -// -// NdArrayDesc is basically identical to Dims defined in types.h. -// However, as Dims is to be deprecated, this class exists as an adaptor -// to enable simple unoptimized implementations of element-wise broadcasting -// operations. -template -struct NdArrayDesc { - // The "extent" of each dimension. Indices along dimension d must be in the - // half-open interval [0, extents[d]). - int extents[N]; - - // The number of *elements* (not bytes) between consecutive indices of each - // dimension. - int strides[N]; -}; - -// DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING -// BROADCASTING. -// -// Same as Offset(), except takes as NdArrayDesc instead of Dims. -inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2, - int i3) { - TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]); - return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] + - i3 * desc.strides[3]; -} - -inline int SubscriptToIndex(const NdArrayDesc<5>& desc, int indexes[5]) { - return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] + - indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] + - indexes[4] * desc.strides[4]; -} - -inline int SubscriptToIndex(const NdArrayDesc<8>& desc, int indexes[8]) { - return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] + - indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] + - indexes[4] * desc.strides[4] + indexes[5] * desc.strides[5] + - indexes[6] * desc.strides[6] + indexes[7] * desc.strides[7]; -} - -// Given the dimensions of the operands for an element-wise binary broadcast, -// adjusts them so that they can be directly iterated over with simple loops. -// Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and -// 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr. -// -// This function assumes that the two input shapes are compatible up to -// broadcasting and the shorter one has already been prepended with 1s to be the -// same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64), -// shape1 must already have been prepended to be (1, 1, 1, 64). Recall that -// Dims refer to shapes in reverse order. In this case, input0_dims will be -// (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1). -// -// When two shapes are compatible up to broadcasting, for each dimension d, -// the input extents are either equal, or one of them is 1. -// -// This function performs the following for each dimension d: -// - If the extents are equal, then do nothing since the loop that walks over -// both of the input arrays is correct. -// - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1 -// and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows -// array0 to be referenced *at any index* in dimension d and still access the -// same slice. -template -inline void NdArrayDescsForElementwiseBroadcast(const Dims& input0_dims, - const Dims& input1_dims, - NdArrayDesc* desc0_out, - NdArrayDesc* desc1_out) { - TFLITE_DCHECK(desc0_out != nullptr); - TFLITE_DCHECK(desc1_out != nullptr); - - // Copy dims to desc. - for (int i = 0; i < N; ++i) { - desc0_out->extents[i] = input0_dims.sizes[i]; - desc0_out->strides[i] = input0_dims.strides[i]; - desc1_out->extents[i] = input1_dims.sizes[i]; - desc1_out->strides[i] = input1_dims.strides[i]; - } - - // Walk over each dimension. If the extents are equal do nothing. - // Otherwise, set the desc with extent 1 to have extent equal to the other and - // stride 0. - for (int i = 0; i < N; ++i) { - const int extent0 = ArraySize(input0_dims, i); - const int extent1 = ArraySize(input1_dims, i); - if (extent0 != extent1) { - if (extent0 == 1) { - desc0_out->strides[i] = 0; - desc0_out->extents[i] = extent1; - } else { - TFLITE_DCHECK_EQ(extent1, 1); - desc1_out->strides[i] = 0; - desc1_out->extents[i] = extent0; - } - } - } -} - -// Copies dims to desc, calculating strides. -template -inline void CopyDimsToDesc(const RuntimeShape& input_shape, - NdArrayDesc* desc_out) { - int desc_stride = 1; - for (int i = N - 1; i >= 0; --i) { - desc_out->extents[i] = input_shape.Dims(i); - desc_out->strides[i] = desc_stride; - desc_stride *= input_shape.Dims(i); - } -} - -template -inline void NdArrayDescsForElementwiseBroadcast( - const RuntimeShape& input0_shape, const RuntimeShape& input1_shape, - NdArrayDesc* desc0_out, NdArrayDesc* desc1_out) { - TFLITE_DCHECK(desc0_out != nullptr); - TFLITE_DCHECK(desc1_out != nullptr); - - auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape); - auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape); - - // Copy dims to desc, calculating strides. - CopyDimsToDesc(extended_input0_shape, desc0_out); - CopyDimsToDesc(extended_input1_shape, desc1_out); - - // Walk over each dimension. If the extents are equal do nothing. - // Otherwise, set the desc with extent 1 to have extent equal to the other and - // stride 0. - for (int i = 0; i < N; ++i) { - const int extent0 = extended_input0_shape.Dims(i); - const int extent1 = extended_input1_shape.Dims(i); - if (extent0 != extent1) { - if (extent0 == 1) { - desc0_out->strides[i] = 0; - desc0_out->extents[i] = extent1; - } else { - TFLITE_DCHECK_EQ(extent1, 1); - desc1_out->strides[i] = 0; - desc1_out->extents[i] = extent0; - } - } - } -} - -template -inline void NdArrayDescsForElementwiseBroadcast( - const RuntimeShape& input0_shape, const RuntimeShape& input1_shape, - const RuntimeShape& input2_shape, NdArrayDesc* desc0_out, - NdArrayDesc* desc1_out, NdArrayDesc* desc2_out) { - TFLITE_DCHECK(desc0_out != nullptr); - TFLITE_DCHECK(desc1_out != nullptr); - TFLITE_DCHECK(desc2_out != nullptr); - - auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape); - auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape); - auto extended_input2_shape = RuntimeShape::ExtendedShape(N, input2_shape); - - // Copy dims to desc, calculating strides. - CopyDimsToDesc(extended_input0_shape, desc0_out); - CopyDimsToDesc(extended_input1_shape, desc1_out); - CopyDimsToDesc(extended_input2_shape, desc2_out); - - // Walk over each dimension. If the extents are equal do nothing. - // Otherwise, set the desc with extent 1 to have extent equal to the other and - // stride 0. - for (int i = 0; i < N; ++i) { - const int extent0 = extended_input0_shape.Dims(i); - const int extent1 = extended_input1_shape.Dims(i); - const int extent2 = extended_input2_shape.Dims(i); - - int extent = extent0; - if (extent1 != 1) extent = extent1; - if (extent2 != 1) extent = extent2; - - TFLITE_DCHECK(extent0 == 1 || extent0 == extent); - TFLITE_DCHECK(extent1 == 1 || extent1 == extent); - TFLITE_DCHECK(extent2 == 1 || extent2 == extent); - - if (!(extent0 == extent1 && extent1 == extent2)) { - if (extent0 == 1) { - desc0_out->strides[i] = 0; - desc0_out->extents[i] = extent; - } - if (extent1 == 1) { - desc1_out->strides[i] = 0; - desc1_out->extents[i] = extent; - } - if (extent2 == 1) { - desc2_out->strides[i] = 0; - desc2_out->extents[i] = extent; - } - } - } -} - -// Detailed implementation of NDOpsHelper, the indexes must be a zero array. -// This implementation is equivalent to N nested loops. Ex, if N=4, it can be -// re-writen as: -// for (int b = 0; b < output.extents[0]; ++b) { -// for (int y = 0; y < output.extents[1]; ++y) { -// for (int x = 0; x < output.extents[2]; ++x) { -// for (int c = 0; c < output.extents[3]; ++c) { -// calc({b,y,x,c}); -// } -// } -// } -// } -template -typename std::enable_if::type NDOpsHelperImpl( - const NdArrayDesc& output, const Calc& calc, int indexes[N]) { - for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) { - NDOpsHelperImpl(output, calc, indexes); - } -} - -template -typename std::enable_if::type NDOpsHelperImpl( - const NdArrayDesc& output, const Calc& calc, int indexes[N]) { - for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) { - calc(indexes); - } -} - -// Execute the calc function in the innermost iteration based on the shape of -// the output. The calc function should take a single argument of type int[N]. -template -inline void NDOpsHelper(const NdArrayDesc& output, const Calc& calc) { - int indexes[N] = {0}; - NDOpsHelperImpl(output, calc, indexes); -} -// Copied from gemmlowp::RoundDown when we dropped direct dependency on -// gemmlowp. -// -// Returns the runtime argument rounded down to the nearest multiple of -// the fixed Modulus. -template -Integer RoundDown(Integer i) { - return i - (i % Modulus); -} - -// Copied from gemmlowp::RoundUp when we dropped direct dependency on -// gemmlowp. -// -// Returns the runtime argument rounded up to the nearest multiple of -// the fixed Modulus. -template -Integer RoundUp(Integer i) { - return RoundDown(i + Modulus - 1); -} - -// Copied from gemmlowp::CeilQuotient when we dropped direct dependency on -// gemmlowp. -// -// Returns the quotient a / b rounded up ('ceil') to the nearest integer. -template -Integer CeilQuotient(Integer a, Integer b) { - return (a + b - 1) / b; -} - -// This function is a copy of gemmlowp::HowManyThreads, copied when we dropped -// the direct dependency of internal/optimized/ on gemmlowp. -// -// It computes a reasonable number of threads to use for a GEMM of shape -// (rows, cols, depth). -// -// TODO(b/131910176): get rid of this function by switching each call site -// to its own more sensible logic for its own workload. -template -inline int LegacyHowManyThreads(int max_num_threads, int rows, int cols, - int depth) { - // Early-exit in the default case where multi-threading is disabled. - if (max_num_threads == 1) { - return 1; - } - - // Ensure that each thread has KernelRows rows to process, if at all possible. - int thread_count = std::min(max_num_threads, rows / KernelRows); - - // Limit the number of threads according to the overall size of the problem. - if (thread_count > 1) { - // Empirically determined value. - static constexpr std::uint64_t min_cubic_size_per_thread = 64 * 1024; - - // We can only multiply two out of three sizes without risking overflow - const std::uint64_t cubic_size = - std::uint64_t(rows) * std::uint64_t(cols) * std::uint64_t(depth); - - thread_count = std::min( - thread_count, static_cast(cubic_size / min_cubic_size_per_thread)); - } - - if (thread_count < 1) { - thread_count = 1; - } - - assert(thread_count > 0 && thread_count <= max_num_threads); - return thread_count; -} - -template -void optimized_ops_preload_l1_stream(const T* ptr) { -#ifdef __GNUC__ - // builtin offered by GCC-compatible compilers including clang - __builtin_prefetch(ptr, /* 0 means read */ 0, /* 0 means no locality */ 0); -#else - (void)ptr; -#endif -} - -template -void optimized_ops_preload_l1_keep(const T* ptr) { -#ifdef __GNUC__ - // builtin offered by GCC-compatible compilers including clang - __builtin_prefetch(ptr, /* 0 means read */ 0, /* 3 means high locality */ 3); -#else - (void)ptr; -#endif -} - -template -void optimized_ops_prefetch_write_l1_keep(const T* ptr) { -#ifdef __GNUC__ - // builtin offered by GCC-compatible compilers including clang - __builtin_prefetch(ptr, /* 1 means write */ 1, /* 3 means high locality */ 3); -#else - (void)ptr; -#endif -} - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/compatibility.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/compatibility.h deleted file mode 100644 index d3ef0df1..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/compatibility.h +++ /dev/null @@ -1,117 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ - -#include - -#include "tensorflow/lite/kernels/op_macros.h" - - -#ifndef TFLITE_DCHECK -#define TFLITE_DCHECK(condition) (condition) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -#ifndef TFLITE_DCHECK_EQ -#define TFLITE_DCHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -#ifndef TFLITE_DCHECK_NE -#define TFLITE_DCHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -#ifndef TFLITE_DCHECK_GE -#define TFLITE_DCHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -#ifndef TFLITE_DCHECK_GT -#define TFLITE_DCHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -#ifndef TFLITE_DCHECK_LE -#define TFLITE_DCHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -#ifndef TFLITE_DCHECK_LT -#define TFLITE_DCHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ASSERT_FALSE -#endif - -// TODO(ahentz): Clean up: We should stick to the DCHECK versions. -#ifndef TFLITE_CHECK -#define TFLITE_CHECK(condition) (condition) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TFLITE_CHECK_EQ -#define TFLITE_CHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TFLITE_CHECK_NE -#define TFLITE_CHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TFLITE_CHECK_GE -#define TFLITE_CHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TFLITE_CHECK_GT -#define TFLITE_CHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TFLITE_CHECK_LE -#define TFLITE_CHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TFLITE_CHECK_LT -#define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT -#endif - -#ifndef TF_LITE_STATIC_MEMORY -// TODO(b/162019032): Consider removing these type-aliases. -//##if 0 -using int8 = std::int8_t; -using uint8 = std::uint8_t; -using int16 = std::int16_t; -using uint16 = std::uint16_t; -using int32 = std::int32_t; -using uint32 = std::uint32_t; -#endif // !defined(TF_LITE_STATIC_MEMORY) - - -// TFLITE_DEPRECATED() -// -// Duplicated from absl/base/macros.h to avoid pulling in that library. -// Marks a deprecated class, struct, enum, function, method and variable -// declarations. The macro argument is used as a custom diagnostic message (e.g. -// suggestion of a better alternative). -// -// Example: -// -// class TFLITE_DEPRECATED("Use Bar instead") Foo {...}; -// TFLITE_DEPRECATED("Use Baz instead") void Bar() {...} -// -// Every usage of a deprecated entity will trigger a warning when compiled with -// clang's `-Wdeprecated-declarations` option. This option is turned off by -// default, but the warnings will be reported by clang-tidy. -#if defined(__clang__) && __cplusplus >= 201103L -#define TFLITE_DEPRECATED(message) __attribute__((deprecated(message))) -#endif - -#ifndef TFLITE_DEPRECATED -#define TFLITE_DEPRECATED(message) -#endif - - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ - diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/cppmath.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/cppmath.h deleted file mode 100644 index 24a3aec8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/cppmath.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ - -#include - -namespace tflite { - -#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ - (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ - defined(__ZEPHYR__) -#define TF_LITE_GLOBAL_STD_PREFIX -#else -#define TF_LITE_GLOBAL_STD_PREFIX std -#endif - -#define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \ - template \ - inline T tf_name(const T x) { \ - return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \ - } - -DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/max.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/max.h deleted file mode 100644 index c1810027..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/max.h +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ - -#include - -namespace tflite { - -#if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__) -inline float TfLiteMax(const float& x, const float& y) { - return std::max(x, y); -} -#else -template -inline T TfLiteMax(const T& x, const T& y) { - return std::fmax(x, y); -} -#endif - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/min.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/min.h deleted file mode 100644 index 62035dcc..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/min.h +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ - -#include - -namespace tflite { - -#if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__) -inline float TfLiteMin(const float& x, const float& y) { - return std::min(x, y); -} -#else -template -inline T TfLiteMin(const T& x, const T& y) { - return std::fmin(x, y); -} -#endif - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/optimized/neon_check.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/optimized/neon_check.h deleted file mode 100644 index bbf745ce..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/optimized/neon_check.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ - -#if defined(__ARM_NEON__) || defined(__ARM_NEON) -#define USE_NEON -#include -#endif - -#if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON -#define USE_NEON -#include "NEON_2_SSE.h" -#endif - -// NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is -// defined, PortableSomeFunc(args) otherwise. -#ifdef USE_NEON -// Always use Neon code -#define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) - -#else -// No NEON available: Use Portable code -#define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) - -#endif // defined(USE_NEON) - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/portable_tensor.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/portable_tensor.h deleted file mode 100644 index 4d71c967..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/portable_tensor.h +++ /dev/null @@ -1,122 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -inline RuntimeShape GetTensorShape(std::vector data) { - return RuntimeShape(data.size(), data.data()); -} - -// A list of tensors in a format that can be used by kernels like split and -// concatenation. -template -class VectorOfTensors { - public: - // Build with the tensors in 'tensor_list'. - VectorOfTensors(const TfLiteContext& context, - const TfLiteIntArray& tensor_list) { - int num_tensors = tensor_list.size; - - all_data_.reserve(num_tensors); - all_shape_.reserve(num_tensors); - all_shape_ptr_.reserve(num_tensors); - - for (int i = 0; i < num_tensors; ++i) { - TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; - all_data_.push_back(GetTensorData(t)); - all_shape_.push_back(GetTensorShape(t)); - } - - // Taking the pointer from inside a std::vector is only OK if the vector is - // never modified, so we populate all_shape in the previous loop and then we - // are free to grab iterators here. - for (int i = 0; i < num_tensors; ++i) { - all_shape_ptr_.push_back(&all_shape_[i]); - } - } - // Return a pointer to the data pointers of all tensors in the list. For - // example: - // float* const* f = v.data(); - // f[0][1] is the second element of the first tensor. - T* const* data() const { return all_data_.data(); } - - // Return a pointer the shape pointers of all tensors in the list. For - // example: - // const RuntimeShape* const* d = v.dims(); - // dims[1] are the dimensions of the second tensor in the list. - const RuntimeShape* const* shapes() const { return all_shape_ptr_.data(); } - - private: - std::vector all_data_; - std::vector all_shape_; - std::vector all_shape_ptr_; -}; - -// A list of quantized tensors in a format that can be used by kernels like -// split and concatenation. -class VectorOfQuantizedTensors : public VectorOfTensors { - public: - // Build with the tensors in 'tensor_list'. - VectorOfQuantizedTensors(const TfLiteContext& context, - const TfLiteIntArray& tensor_list) - : VectorOfTensors(context, tensor_list) { - for (int i = 0; i < tensor_list.size; ++i) { - TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; - zero_point_.push_back(t->params.zero_point); - scale_.push_back(t->params.scale); - } - } - - const float* scale() const { return scale_.data(); } - const int32_t* zero_point() const { return zero_point_.data(); } - - private: - std::vector zero_point_; - std::vector scale_; -}; - -// Writes randomly accessed values from `input` sequentially into `output`. -template -class SequentialTensorWriter { - public: - SequentialTensorWriter(const TfLiteTensor* input, TfLiteTensor* output) { - input_data_ = GetTensorData(input); - output_ptr_ = GetTensorData(output); - } - SequentialTensorWriter(const T* input_data, T* output_data) - : input_data_(input_data), output_ptr_(output_data) {} - - void Write(int position) { *output_ptr_++ = input_data_[position]; } - void WriteN(int position, int len) { - memcpy(output_ptr_, &input_data_[position], sizeof(T) * len); - output_ptr_ += len; - } - - private: - const T* input_data_; - T* output_ptr_; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/quantization_util.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/quantization_util.cc deleted file mode 100644 index ed0fe439..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/quantization_util.cc +++ /dev/null @@ -1,395 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/quantization_util.h" - -#include -#include -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" - -namespace tflite { - -namespace { -// These constants are used to manipulate the binary representation of doubles. -// Double-precision binary64 floating point format is: -// Bit | 63 | 62-52 | 51-0 | -// | Sign | Exponent | Fraction | -// To avoid 64-bit integers as much as possible, I break this into high and -// low 32-bit chunks. High is: -// Bit | 31 | 30-20 | 19-0 | -// | Sign | Exponent | High Fraction | -// Low is: -// Bit | 31-0 | -// | Low Fraction | -// We then access the components through logical bit-wise operations to -// extract the parts needed, with the positions and masks derived from the -// layout shown above. -constexpr uint64_t kSignMask = 0x8000000000000000LL; -constexpr uint64_t kExponentMask = 0x7ff0000000000000LL; -constexpr int32_t kExponentShift = 52; -constexpr int32_t kExponentBias = 1023; -constexpr uint32_t kExponentIsBadNum = 0x7ff; -constexpr uint64_t kFractionMask = 0x000fffffffc00000LL; -constexpr uint32_t kFractionShift = 22; -constexpr uint32_t kFractionRoundingMask = 0x003fffff; -constexpr uint32_t kFractionRoundingThreshold = 0x00200000; -} // namespace - -void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, - int* shift) { - if (double_multiplier == 0.) { - *quantized_multiplier = 0; - *shift = 0; - return; - } -#ifdef TFLITE_EMULATE_FLOAT - // If we're trying to avoid the use of floating-point instructions (for - // example on microcontrollers) then use an alternative implementation - // that only requires integer and bitwise operations. To enable this, you - // need to set the define during the build process for your platform. - int64_t q_fixed = IntegerFrExp(double_multiplier, shift); -#else // TFLITE_EMULATE_FLOAT - const double q = std::frexp(double_multiplier, shift); - auto q_fixed = static_cast(TfLiteRound(q * (1ll << 31))); -#endif // TFLITE_EMULATE_FLOAT - TFLITE_CHECK(q_fixed <= (1ll << 31)); - if (q_fixed == (1ll << 31)) { - q_fixed /= 2; - ++*shift; - } - TFLITE_CHECK_LE(q_fixed, std::numeric_limits::max()); - // A shift amount smaller than -31 would cause all bits to be shifted out - // and thus all results would be zero. We implement that instead with - // q_fixed==0, so as to avoid hitting issues with right-shift - // operations with shift amounts greater than 31. Note that this happens - // roughly when abs(double_multiplier) < 2^-31 and the present handling means - // that we're effectively flushing tiny double_multiplier's to zero. - // We could conceivably handle values in the range (roughly) [32, 63] - // as 'denormals' i.e. (shift==0, q_fixed < 2^30). In that point of view - // the present handling is just doing 'flush denormals to zero'. We could - // reconsider and actually generate nonzero denormals if a need arises. - if (*shift < -31) { - *shift = 0; - q_fixed = 0; - } - *quantized_multiplier = static_cast(q_fixed); -} - -void QuantizeMultiplierGreaterThanOne(double double_multiplier, - int32_t* quantized_multiplier, - int* left_shift) { - TFLITE_CHECK_GT(double_multiplier, 1.); - QuantizeMultiplier(double_multiplier, quantized_multiplier, left_shift); - TFLITE_CHECK_GE(*left_shift, 0); -} - -void QuantizeMultiplierSmallerThanOneExp(double double_multiplier, - int32_t* quantized_multiplier, - int* left_shift) { - TFLITE_CHECK_LT(double_multiplier, 1.); - TFLITE_CHECK_GT(double_multiplier, 0.); - int shift; - QuantizeMultiplier(double_multiplier, quantized_multiplier, &shift); - TFLITE_CHECK_LE(shift, 0); - *left_shift = shift; -} - -int64_t IntegerFrExp(double input, int* shift) { - // Make sure our assumptions about the double layout hold. - TFLITE_CHECK_EQ(8, sizeof(double)); - - // We want to access the bits of the input double value directly, which is - // tricky to do safely, so use a union to handle the casting. - union { - double double_value; - uint64_t double_as_uint; - } cast_union; - cast_union.double_value = input; - const uint64_t u = cast_union.double_as_uint; - - // If the bitfield is all zeros apart from the sign bit, this is a normalized - // zero value, so return standard values for this special case. - if ((u & ~kSignMask) == 0) { - *shift = 0; - return 0; - } - - // Deal with NaNs and Infs, which are always indicated with a fixed pattern in - // the exponent, and distinguished by whether the fractions are zero or - // non-zero. - const uint32_t exponent_part = ((u & kExponentMask) >> kExponentShift); - if (exponent_part == kExponentIsBadNum) { - *shift = std::numeric_limits::max(); - if (u & kFractionMask) { - // NaN, so just return zero (with the exponent set to INT_MAX). - return 0; - } else { - // Infinity, so return +/- INT_MAX. - if (u & kSignMask) { - return std::numeric_limits::min(); - } else { - return std::numeric_limits::max(); - } - } - } - - // The shift is fairly easy to extract from the high bits of the double value, - // just by masking it out and applying a bias. The std::frexp() implementation - // always returns values between 0.5 and 1.0 though, whereas the exponent - // assumes 1.0 to 2.0 is the standard range, so I add on one to match that - // interface. - *shift = (exponent_part - kExponentBias) + 1; - - // There's an implicit high bit in the double format definition, so make sure - // we include that at the top, and then reconstruct the rest of the fractional - // value from the remaining fragments. - int64_t fraction = 0x40000000 + ((u & kFractionMask) >> kFractionShift); - - // We're cutting off some bits at the bottom, so to exactly match the standard - // frexp implementation here we'll apply rounding by adding one to the least - // significant bit of the result if the discarded portion is over half of the - // maximum. - if ((u & kFractionRoundingMask) > kFractionRoundingThreshold) { - fraction += 1; - } - // Negate the fraction if the sign bit was set. - if (u & kSignMask) { - fraction *= -1; - } - - return fraction; -} - -double DoubleFromFractionAndShift(int64_t fraction, int shift) { - union { - double double_value; - uint64_t double_as_uint; - } result; - - // Detect NaNs and infinities. - if (shift == std::numeric_limits::max()) { - if (fraction == 0) { - return std::numeric_limits::quiet_NaN(); - } else if (fraction > 0) { - return std::numeric_limits::infinity(); - } else { - return -std::numeric_limits::infinity(); - } - } - - // Return a normalized zero for a zero fraction. - if (fraction == 0) { - result.double_as_uint = 0; - return result.double_value; - } - - bool is_negative = (fraction < 0); - int64_t encoded_fraction = is_negative ? -fraction : fraction; - int64_t encoded_shift = (shift - 1); - while (encoded_fraction < 0x40000000) { - encoded_fraction *= 2; - encoded_shift -= 1; - } - while (encoded_fraction > 0x80000000) { - encoded_fraction /= 2; - encoded_shift += 1; - } - encoded_fraction -= 0x40000000; - if (encoded_shift < -1022) { - encoded_shift = -1023; - } else if (encoded_shift > 1022) { - encoded_shift = 1023; - } - encoded_shift += kExponentBias; - uint64_t encoded_sign = is_negative ? kSignMask : 0; - result.double_as_uint = encoded_sign | (encoded_shift << kExponentShift) | - (encoded_fraction << kFractionShift); - return result.double_value; -} - -double IntegerDoubleMultiply(double a, double b) { - int a_shift; - const int64_t a_fraction = IntegerFrExp(a, &a_shift); - int b_shift; - const int64_t b_fraction = IntegerFrExp(b, &b_shift); - // Detect NaNs and infinities. - if (a_shift == std::numeric_limits::max() || - (b_shift == std::numeric_limits::max())) { - return std::numeric_limits::quiet_NaN(); - } - const int result_shift = a_shift + b_shift + 1; - const int64_t result_fraction = (a_fraction * b_fraction) >> 32; - return DoubleFromFractionAndShift(result_fraction, result_shift); -} - -int IntegerDoubleCompare(double a, double b) { - int a_shift; - const int64_t a_fraction = IntegerFrExp(a, &a_shift); - int b_shift; - const int64_t b_fraction = IntegerFrExp(b, &b_shift); - - // Detect NaNs and infinities. - if (a_shift == std::numeric_limits::max() || - (b_shift == std::numeric_limits::max())) { - return 1; - } - - if ((a_fraction == 0) && (b_fraction < 0)) { - return 1; - } else if ((a_fraction < 0) && (b_fraction == 0)) { - return -1; - } else if (a_shift < b_shift) { - return -1; - } else if (a_shift > b_shift) { - return 1; - } else if (a_fraction < b_fraction) { - return -1; - } else if (a_fraction > b_fraction) { - return 1; - } else { - return 0; - } -} - -void PreprocessSoftmaxScaling(double beta, double input_scale, - int input_integer_bits, - int32_t* quantized_multiplier, int* left_shift) { - // If the overall multiplier (input and beta) is large, then exp() of an - // input difference of 1 scaled by this will be large. In other words, we - // can cap the multiplier and know that, when it is used, the output will be - // (round to) zero wherever the input is not at the maximum value. - - // If the overall scale is less than one, and input_integer_bits=0, then the - // result is double equivalent of Q0.31 (actually with more precision). Thus - // this generates a Q(input_integer_bits).(31-input_integer_bits) - // representation. -#ifdef TFLITE_EMULATE_FLOAT - const double input_beta = IntegerDoubleMultiply(beta, input_scale); - int shift; - int64_t fraction = IntegerFrExp(input_beta, &shift); - shift += (31 - input_integer_bits); - double input_beta_real_multiplier = - DoubleFromFractionAndShift(fraction, shift); - if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) { - input_beta_real_multiplier = (1ll << 31) - 1.0; - } -#else // TFLITE_EMULATE_FLOAT - const double input_beta_real_multiplier = std::min( - beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0); -#endif // TFLITE_EMULATE_FLOAT - - QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, - quantized_multiplier, left_shift); -} - -void PreprocessLogSoftmaxScalingExp(double beta, double input_scale, - int input_integer_bits, - int32_t* quantized_multiplier, - int* left_shift, - int32_t* reverse_scaling_divisor, - int* reverse_scaling_left_shift) { - PreprocessSoftmaxScaling(beta, input_scale, input_integer_bits, - quantized_multiplier, left_shift); - - // Also calculate what amounts to the inverse scaling factor for the input. - const double real_reverse_scaling_divisor = - (1 << (31 - *left_shift)) / static_cast(*quantized_multiplier); - tflite::QuantizeMultiplierSmallerThanOneExp(real_reverse_scaling_divisor, - reverse_scaling_divisor, - reverse_scaling_left_shift); -} - -int CalculateInputRadius(int input_integer_bits, int input_left_shift, - int total_signed_bits) { -#ifdef TFLITE_EMULATE_FLOAT - int64_t result = (1 << input_integer_bits) - 1; - result <<= (total_signed_bits - input_integer_bits); - result >>= input_left_shift; - return result; -#else // TFLITE_EMULATE_FLOAT - const double max_input_rescaled = - 1.0 * ((1 << input_integer_bits) - 1) * - (1ll << (total_signed_bits - input_integer_bits)) / - (1ll << input_left_shift); - // Tighten bound using floor. Suppose that we could use the exact value. - // After scaling the difference, the result would be at the maximum. Thus we - // must ensure that our value has lower magnitude. - return static_cast(std::floor(max_input_rescaled)); -#endif // TFLITE_EMULATE_FLOAT -} - -void NudgeQuantizationRange(const float min, const float max, - const int quant_min, const int quant_max, - float* nudged_min, float* nudged_max, - float* nudged_scale) { - // This code originates from tensorflow/core/kernels/fake_quant_ops_functor.h. - const float quant_min_float = static_cast(quant_min); - const float quant_max_float = static_cast(quant_max); - *nudged_scale = (max - min) / (quant_max_float - quant_min_float); - const float zero_point_from_min = quant_min_float - min / *nudged_scale; - uint16_t nudged_zero_point; - if (zero_point_from_min < quant_min_float) { - nudged_zero_point = static_cast(quant_min); - } else if (zero_point_from_min > quant_max_float) { - nudged_zero_point = static_cast(quant_max); - } else { - nudged_zero_point = static_cast(TfLiteRound(zero_point_from_min)); - } - *nudged_min = (quant_min_float - nudged_zero_point) * (*nudged_scale); - *nudged_max = (quant_max_float - nudged_zero_point) * (*nudged_scale); -} - -void FakeQuantizeArray(const float nudged_scale, const float nudged_min, - const float nudged_max, const float* input_data, - float* output_data, const float size) { - // This code originates from tensorflow/core/kernels/fake_quant_ops_functor.h. - const float inv_nudged_scale = 1.0f / nudged_scale; - - for (int i = 0; i < size; i++) { - const float src_val = input_data[i]; - const float clamped = std::min(nudged_max, std::max(nudged_min, src_val)); - const float clamped_shifted = clamped - nudged_min; - const float dst_val = - TfLiteRound(clamped_shifted * inv_nudged_scale) * nudged_scale + - nudged_min; - output_data[i] = dst_val; - } -} - -bool CheckedLog2(const float x, int* log2_result) { - // Using TfLiteRound instead of std::round and std::log instead of - // std::log2 to work around these functions being missing in a toolchain - // used in some TensorFlow tests as of May 2018. - const float x_log2 = std::log(x) * (1.0f / std::log(2.0f)); - const float x_log2_rounded = TfLiteRound(x_log2); - const float x_log2_fracpart = x_log2 - x_log2_rounded; - - *log2_result = static_cast(x_log2_rounded); - return std::abs(x_log2_fracpart) < 1e-3f; -} - -void QuantizeMultiplierArray(const double* effective_scales, size_t size, - int32_t* effective_scale_significand, - int* effective_shift) { - for (size_t i = 0; i < size; ++i) { - QuantizeMultiplier(effective_scales[i], &effective_scale_significand[i], - &effective_shift[i]); - } -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/quantization_util.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/quantization_util.h deleted file mode 100644 index 0ee914b0..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/quantization_util.h +++ /dev/null @@ -1,292 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_ - -#include -#include -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -// Given the min and max values of a float array, return -// reasonable quantization parameters to use for this array. -template -QuantizationParams ChooseQuantizationParams(double rmin, double rmax, - bool narrow_range) { - const T qmin = std::numeric_limits::min() + (narrow_range ? 1 : 0); - const T qmax = std::numeric_limits::max(); - const double qmin_double = qmin; - const double qmax_double = qmax; - // 0 should always be a representable value. Let's assume that the initial - // min,max range contains 0. - TFLITE_CHECK_LE(rmin, 0.); - TFLITE_CHECK_GE(rmax, 0.); - if (rmin == rmax) { - // Special case where the min,max range is a point. Should be {0}. - TFLITE_CHECK_EQ(rmin, 0.); - TFLITE_CHECK_EQ(rmax, 0.); - QuantizationParams quantization_params; - quantization_params.zero_point = 0; - quantization_params.scale = 0.; - return quantization_params; - } - - // General case. - // - // First determine the scale. - const double scale = (rmax - rmin) / (qmax_double - qmin_double); - - // Zero-point computation. - // First the initial floating-point computation. The zero-point can be - // determined from solving an affine equation for any known pair - // (real value, corresponding quantized value). - // We know two such pairs: (rmin, qmin) and (rmax, qmax). - // The arithmetic error on the zero point computed from either pair - // will be roughly machine_epsilon * (sum of absolute values of terms) - // so we want to use the variant that adds the smaller terms. - const double zero_point_from_min = qmin_double - rmin / scale; - const double zero_point_from_max = qmax_double - rmax / scale; - const double zero_point_from_min_error = - std::abs(qmin_double) + std::abs(rmin / scale); - const double zero_point_from_max_error = - std::abs(qmax_double) + std::abs(rmax / scale); - - const double zero_point_double = - zero_point_from_min_error < zero_point_from_max_error - ? zero_point_from_min - : zero_point_from_max; - - // Now we need to nudge the zero point to be an integer - // (our zero points are integer, and this is motivated by the requirement - // to be able to represent the real value "0" exactly as a quantized value, - // which is required in multiple places, for example in Im2col with SAME - // padding). - T nudged_zero_point = 0; - if (zero_point_double < qmin_double) { - nudged_zero_point = qmin; - } else if (zero_point_double > qmax_double) { - nudged_zero_point = qmax; - } else { - nudged_zero_point = static_cast(round(zero_point_double)); - } - // The zero point should always be in the range of quantized value, - // [qmin, qmax]. - TFLITE_CHECK_GE(nudged_zero_point, qmin); - TFLITE_CHECK_LE(nudged_zero_point, qmax); - - // Finally, store the result nudged quantization params. - QuantizationParams quantization_params; - quantization_params.zero_point = nudged_zero_point; - quantization_params.scale = scale; - return quantization_params; -} - -template -QuantizationParams ChooseQuantizationParams(double rmin, double rmax) { - return ChooseQuantizationParams(rmin, rmax, false); -} - -// Converts a floating-point number to an integer. For all inputs x where -// static_cast(x) is legal according to the C++ standard, the result -// is identical to that cast (i.e. the result is x with its fractional part -// truncated whenever that is representable as IntOut). -// -// static_cast would cause undefined behavior for the following cases, which -// have well-defined behavior for this function: -// -// 1. If x is NaN, the result is zero. -// -// 2. If the truncated form of x is above the representable range of IntOut, -// the result is std::numeric_limits::max(). -// -// 3. If the truncated form of x is below the representable range of IntOut, -// the result is std::numeric_limits::min(). -// -// Note that cases #2 and #3 cover infinities as well as finite numbers. -// -// The range of FloatIn must include the range of IntOut, otherwise -// the results are undefined. -// TODO(sfeuz): Replace by absl::SafeCast once available. -template -IntOut SafeCast(FloatIn x) { - static_assert(!std::numeric_limits::is_integer, - "FloatIn is integer"); - static_assert(std::numeric_limits::is_integer, - "IntOut is not integer"); - static_assert(std::numeric_limits::radix == 2, "IntOut is base 2"); - - // Special case NaN, for which the logic below doesn't work. - if (std::isnan(x)) { - return 0; - } - - // Negative values all clip to zero for unsigned results. - if (!std::numeric_limits::is_signed && x < 0) { - return 0; - } - - // Handle infinities. - if (std::isinf(x)) { - return x < 0 ? std::numeric_limits::min() - : std::numeric_limits::max(); - } - - // Set exp such that x == f * 2^exp for some f with |f| in [0.5, 1.0), - // unless x is zero in which case exp == 0. Note that this implies that the - // magnitude of x is strictly less than 2^exp. - int exp = 0; - std::frexp(x, &exp); - - // Let N be the number of non-sign bits in the representation of IntOut. If - // the magnitude of x is strictly less than 2^N, the truncated version of x - // is representable as IntOut. The only representable integer for which this - // is not the case is kMin for signed types (i.e. -2^N), but that is covered - // by the fall-through below. - if (exp <= std::numeric_limits::digits) { - return x; - } - - // Handle numbers with magnitude >= 2^N. - return x < 0 ? std::numeric_limits::min() - : std::numeric_limits::max(); -} - -// Decompose a double multiplier into a Q0.31 int32 representation of its -// significand, and shift representation of NEGATIVE its exponent --- -// this is intended as a RIGHT-shift. -// -// Restricted to the case where the multiplier < 1 (and non-negative). -void QuantizeMultiplierSmallerThanOneExp(double double_multiplier, - int32_t* quantized_multiplier, - int* left_shift); - -// Decompose a double multiplier into a Q0.31 int32 representation of its -// significand, and shift representation of its exponent. -// -// Restricted to the case where the multiplier > 1. -void QuantizeMultiplierGreaterThanOne(double double_multiplier, - int32_t* quantized_multiplier, - int* left_shift); - -// Decompose a double multiplier into a Q0.31 int32 representation of its -// significand, and shift representation of its exponent. -// -// Handles an arbitrary positive multiplier. The 'shift' output-value is -// basically the 'floating-point exponent' of the multiplier: -// Negative for a right-shift (when the multiplier is <1), positive for a -// left-shift (when the multiplier is >1) -void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, - int* shift); - -// Splits a double input value into a returned fraction, and a shift value from -// the exponent, using only bitwise and integer operations to support -// microcontrollers and other environments without floating-point support. -// -// This is designed to be a replacement for how std::frexp() is used within the -// QuantizeMultiplier() function, and so has a different signature than the -// standard version, returning a 64-bit integer rather than a double. This -// result has a maximum value of 1<<31, with the fraction expressed as a -// proportion of that maximum. -// -// std::frexp() returns NaNs and infinities unmodified, but since we're -// returning integers that can't represent those values, instead we return -// a shift of std::numeric_limits::max() for all bad numbers, with an int64 -// result of 0 for NaNs, std:numeric_limits::max() for +INFINITY, and -// std::numeric_limits::min() for -INFINITY. Denormalized inputs will -// result in return values that end up truncating some bits at the end, -// reflecting the loss of precision inherent in denormalization. -int64_t IntegerFrExp(double input, int* shift); - -// Converts an integer fraction in the format produced by IntegerFrExp (where -// 0x40000000 is 1.0) and an exponent shift (between -1022 and +1022) into an -// IEEE binary64 double format result. The implementation uses only integer and -// bitwise operators, so no floating point hardware support or emulation is -// needed. This is here so quantized operations can run non-time-critical -// preparation calculations on microcontrollers and other platforms without -// float support. -double DoubleFromFractionAndShift(int64_t fraction, int shift); - -// Performs a multiplication of two numbers in double format, using only integer -// and bitwise instructions. This is aimed at supporting housekeeping functions -// for quantized operations on microcontrollers without floating-point hardware. -double IntegerDoubleMultiply(double a, double b); - -// Returns -1 if a is less than b, 0 if a and b are equal, and +1 if a is -// greater than b. It is implemented using only integer and logical instructions -// so that it can be easily run on microcontrollers for quantized operations. -int IntegerDoubleCompare(double a, double b); - -// This first creates a multiplier in a double equivalent of -// Q(input_integer_bits).(31-input_integer_bits) representation, with extra -// precision in the double's fractional bits. It then splits the result into -// significand and exponent. -void PreprocessSoftmaxScaling(double beta, double input_scale, - int input_integer_bits, - int32_t* quantized_multiplier, int* left_shift); -// Like PreprocessSoftmaxScaling, but inverse scaling factors also calculated. -void PreprocessLogSoftmaxScalingExp(double beta, double input_scale, - int input_integer_bits, - int32_t* quantized_multiplier, - int* left_shift, - int32_t* reverse_scaling_divisor, - int* reverse_scaling_left_shift); -// Calculate the largest input that will result in a within-bounds intermediate -// result within MultiplyByQuantizedMultiplierGreaterThanOne. In other words, -// it must not overflow before we reduce the value by multiplication by the -// input multiplier. The negative radius is used as the minimum difference in -// Softmax. -int CalculateInputRadius(int input_integer_bits, int input_left_shift, - int total_signed_bits = 31); - -// Nudges a min/max quantization range to ensure zero is zero. -// Gymnastics with nudged zero point is to ensure that real zero maps to -// an integer, which is required for e.g. zero-padding in convolutional layers. -// Outputs nudged_min, nudged_max, nudged_scale. -void NudgeQuantizationRange(const float min, const float max, - const int quant_min, const int quant_max, - float* nudged_min, float* nudged_max, - float* nudged_scale); - -// Fake quantizes (quantizes and dequantizes) input_data using the scale, -// nudged_min, and nudged_max from NudgeQuantizationRange. This matches the code -// in TensorFlow's FakeQuantizeWithMinMaxVarsFunctor. -void FakeQuantizeArray(const float nudged_scale, const float nudged_min, - const float nudged_max, const float* input_data, - float* output_data, const float size); - -// If x is approximately a power of two (with any positive or negative -// exponent), stores that exponent (i.e. log2(x)) in *log2_result, otherwise -// returns false. -bool CheckedLog2(const float x, int* log2_result); - -// Decomposes an array of double multipliers into a Q0.31 int32 representation -// of its significand, and shift representation of its exponent. -// -// Handles an arbitrary multiplier. The 'shift' output-value is -// basically the 'floating-point exponent' of the multiplier: -// Negative for a right-shift (when the multiplier is <1), positive for a -// left-shift (when the multiplier is >1) -void QuantizeMultiplierArray(const double* effective_scales, size_t size, - int32_t* effective_scale_significand, - int* effective_shift); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/add.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/add.h deleted file mode 100644 index 3da76d88..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/add.h +++ /dev/null @@ -1,446 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { - -namespace reference_ops { - -template -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const T* input1_data, - const RuntimeShape& input2_shape, const T* input2_data, - const RuntimeShape& output_shape, T* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] + input2_data[i], params.quantized_activation_min, - params.quantized_activation_max); - } -} - -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const float* input1_data, - const RuntimeShape& input2_shape, const float* input2_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; i++) { - auto x = input1_data[i] + input2_data[i]; - output_data[i] = ActivationFunctionWithMinMax( - x, params.float_activation_min, params.float_activation_max); - } -} - -// Element-wise add that can often be used for inner loop of broadcast add as -// well as the non-broadcast add. - -// This function is used for 8-bit as well as for 16-bit, but the accumulator -// is 32-bit for both cases. The overflow does not happen due to the -// choice of the shift (20 or 15, accordingly - see add.cc for more comments). -template -inline void AddElementwise(int size, const ArithmeticParams& params, - const T* input1_data, const T* input2_data, - T* output_data) { - TFLITE_DCHECK_GT(params.input1_offset, -std::numeric_limits::max()); - TFLITE_DCHECK_GT(params.input2_offset, -std::numeric_limits::max()); - TFLITE_DCHECK_LT(params.input1_offset, std::numeric_limits::max()); - TFLITE_DCHECK_LT(params.input2_offset, std::numeric_limits::max()); - - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); - } -} - -// Scalar-broadcast add that can be used for inner loop of more general -// broadcast add, so that, for example, scalar-broadcast with batch will still -// be fast. -inline void AddScalarBroadcast(int size, const ArithmeticParams& params, - uint8_t input1_data, const uint8_t* input2_data, - uint8_t* output_data) { - TFLITE_DCHECK_GT(params.input1_offset, -256); - TFLITE_DCHECK_GT(params.input2_offset, -256); - TFLITE_DCHECK_LT(params.input1_offset, 256); - TFLITE_DCHECK_LT(params.input2_offset, 256); - - const int32_t input1_val = params.input1_offset + input1_data; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - for (int i = 0; i < size; ++i) { - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); - } -} - -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const uint8_t* input1_data, - const RuntimeShape& input2_shape, const uint8_t* input2_data, - const RuntimeShape& output_shape, uint8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - TFLITE_DCHECK_GT(params.input1_offset, -256); - TFLITE_DCHECK_GT(params.input2_offset, -256); - TFLITE_DCHECK_LT(params.input1_offset, 256); - TFLITE_DCHECK_LT(params.input2_offset, 256); - AddElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void AddGeneralParamScale(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int16_t* input1_data, - const RuntimeShape& input2_shape, - const int16_t* input2_data, - const RuntimeShape& output_shape, - int16_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - int max_value = std::numeric_limits::max(); - - TFLITE_DCHECK_GT(params.input1_offset, -max_value); - TFLITE_DCHECK_GT(params.input2_offset, -max_value); - TFLITE_DCHECK_LT(params.input1_offset, max_value); - TFLITE_DCHECK_LT(params.input2_offset, max_value); - AddElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const int16_t* input1_data, - const RuntimeShape& input2_shape, const int16_t* input2_data, - const RuntimeShape& output_shape, int16_t* output_data, - bool pot_scale = true) { - if (!pot_scale) { - AddGeneralParamScale(params, input1_shape, input1_data, input2_shape, - input2_data, output_shape, output_data); - return; - } - - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - - const int input1_shift = params.input1_shift; - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - const int16_t output_activation_min = params.quantized_activation_min; - const int16_t output_activation_max = params.quantized_activation_max; - - TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0); - TFLITE_DCHECK_LE(input1_shift, 0); - TFLITE_DCHECK_LE(params.input2_shift, 0); - const int16_t* not_shift_input = - input1_shift == 0 ? input1_data : input2_data; - const int16_t* shift_input = input1_shift == 0 ? input2_data : input1_data; - const int input_right_shift = - input1_shift == 0 ? -params.input2_shift : -input1_shift; - - for (int i = 0; i < flat_size; i++) { - // F0 uses 0 integer bits, range [-1, 1]. - using F0 = gemmlowp::FixedPoint; - - F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]); - F0 scaled_input = F0::FromRaw( - gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift)); - F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled); - const int16_t raw_output = result.raw(); - const int16_t clamped_output = std::min( - output_activation_max, std::max(output_activation_min, raw_output)); - output_data[i] = clamped_output; - } -} - -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] + - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - params.float_activation_min, params.float_activation_max); - } - } - } - } -} - -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] + - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - params.quantized_activation_min, - params.quantized_activation_max); - } - } - } - } -} - -// This function is used for 8-bit as well as for 16-bit, but the accumulator -// is 32-bit for both cases. The overflow does not happen due to the -// choice of the shift (20 or 15, accordingly - see add.cc for more comments). -template -inline void BroadcastAdd4DSlow( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t shifted_input1_val = - input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = - input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, - params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, - params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); - } - } - } - } -} - -inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params, - const RuntimeShape& unswitched_input1_shape, - const uint8_t* unswitched_input1_data, - const RuntimeShape& unswitched_input2_shape, - const uint8_t* unswitched_input2_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - ArithmeticParams switched_params = unswitched_params; - switched_params.input1_offset = unswitched_params.input2_offset; - switched_params.input1_multiplier = unswitched_params.input2_multiplier; - switched_params.input1_shift = unswitched_params.input2_shift; - switched_params.input2_offset = unswitched_params.input1_offset; - switched_params.input2_multiplier = unswitched_params.input1_multiplier; - switched_params.input2_shift = unswitched_params.input1_shift; - - const bool use_unswitched = - unswitched_params.broadcast_category == - tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast; - - const ArithmeticParams& params = - use_unswitched ? unswitched_params : switched_params; - const uint8_t* input1_data = - use_unswitched ? unswitched_input1_data : unswitched_input2_data; - const uint8_t* input2_data = - use_unswitched ? unswitched_input2_data : unswitched_input1_data; - - // Fivefold nested loops. The second input resets its position for each - // iteration of the second loop. The first input resets its position at the - // beginning of the fourth loop. The innermost loop is an elementwise add of - // sections of the arrays. - uint8_t* output_data_ptr = output_data; - const uint8_t* input1_data_ptr = input1_data; - const uint8_t* input2_data_reset = input2_data; - // In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared - // between input shapes. y3 for input 1 is always broadcast, and so the - // dimension there is 1, whereas optionally y1 might be broadcast for input 2. - // Put another way, - // input1.shape.FlatSize = y0 * y1 * y2 * y4, - // input2.shape.FlatSize = y0 * y2 * y3 * y4. - int y0 = params.broadcast_shape[0]; - int y1 = params.broadcast_shape[1]; - int y2 = params.broadcast_shape[2]; - int y3 = params.broadcast_shape[3]; - int y4 = params.broadcast_shape[4]; - if (y4 > 1) { - // General fivefold pattern, with y4 > 1 so there is a non-broadcast inner - // dimension. - for (int i0 = 0; i0 < y0; ++i0) { - const uint8_t* input2_data_ptr; - for (int i1 = 0; i1 < y1; ++i1) { - input2_data_ptr = input2_data_reset; - for (int i2 = 0; i2 < y2; ++i2) { - for (int i3 = 0; i3 < y3; ++i3) { - AddElementwise(y4, params, input1_data_ptr, input2_data_ptr, - output_data_ptr); - input2_data_ptr += y4; - output_data_ptr += y4; - } - // We have broadcast y4 of input1 data y3 times, and now move on. - input1_data_ptr += y4; - } - } - // We have broadcast y2*y3*y4 of input2 data y1 times, and now move on. - input2_data_reset = input2_data_ptr; - } - } else { - // Special case of y4 == 1, in which the innermost loop is a single element - // and can be combined with the next (y3) as an inner broadcast. - // - // Note that this handles the case of pure scalar broadcast when - // y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar - // broadcast with batch (as y2 > 1). - // - // NOTE The process is the same as the above general case except simplified - // for y4 == 1 and the loop over y3 is contained within the - // AddScalarBroadcast function. - for (int i0 = 0; i0 < y0; ++i0) { - const uint8_t* input2_data_ptr; - for (int i1 = 0; i1 < y1; ++i1) { - input2_data_ptr = input2_data_reset; - for (int i2 = 0; i2 < y2; ++i2) { - AddScalarBroadcast(y3, params, *input1_data_ptr, input2_data_ptr, - output_data_ptr); - input2_data_ptr += y3; - output_data_ptr += y3; - input1_data_ptr += 1; - } - } - input2_data_reset = input2_data_ptr; - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/arg_min_max.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/arg_min_max.h deleted file mode 100644 index e6f34fd7..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/arg_min_max.h +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -template -void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data, - const T3* input2_data, const RuntimeShape& output_shape, - T2* output_data, const Cmp& cmp) { - TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0); - TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1, - output_shape.DimensionsCount()); - int axis = input2_data[0]; - if (axis < 0) { - axis += input1_shape.DimensionsCount(); - } - const int axis_size = input1_shape.Dims(axis); - - int outer_size = 1; - for (int i = 0; i < axis; ++i) { - TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i)); - outer_size *= input1_shape.Dims(i); - } - - int inner_size = 1; - const int dims_count = input1_shape.DimensionsCount(); - for (int i = axis + 1; i < dims_count; ++i) { - TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1)); - inner_size *= input1_shape.Dims(i); - } - for (int outer = 0; outer < outer_size; ++outer) { - for (int inner = 0; inner < inner_size; ++inner) { - auto min_max_value = input1_data[outer * axis_size * inner_size + inner]; - T2 min_max_index = 0; - for (int i = 1; i < axis_size; ++i) { - const auto& curr_value = - input1_data[(outer * axis_size + i) * inner_size + inner]; - if (cmp(curr_value, min_max_value)) { - min_max_value = curr_value; - min_max_index = static_cast(i); - } - } - output_data[outer * inner_size + inner] = min_max_index; - } - } -} -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/binary_function.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/binary_function.h deleted file mode 100644 index 1711940c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/binary_function.h +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -// Also appears to duplicate MinimumMaximum. -// -// R: Result type. T1: Input 1 type. T2: Input 2 type. -template -inline void BroadcastBinaryFunction4DSlow( - const RuntimeShape& unextended_input1_shape, const T1* input1_data, - const RuntimeShape& unextended_input2_shape, const T2* input2_data, - const RuntimeShape& unextended_output_shape, R* output_data, - R (*func)(T1, T2)) { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - auto out_idx = Offset(output_shape, b, y, x, c); - auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); - auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); - auto in1_val = input1_data[in1_idx]; - auto in2_val = input2_data[in2_idx]; - output_data[out_idx] = func(in1_val, in2_val); - } - } - } - } -} - -// R: Result type. T1: Input 1 type. T2: Input 2 type. -template -inline void BinaryFunction(const RuntimeShape& input1_shape, - const T1* input1_data, - const RuntimeShape& input2_shape, - const T2* input2_data, - const RuntimeShape& output_shape, R* output_data, - R (*func)(T1, T2)) { - const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = func(input1_data[i], input2_data[i]); - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/ceil.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/ceil.h deleted file mode 100644 index 66d1dc35..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/ceil.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ - -#include - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline void Ceil(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; ++i) { - output_data[i] = std::ceil(input_data[i]); - } -} - -} // namespace reference_ops -} // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/comparisons.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/comparisons.h deleted file mode 100644 index 6344bdc7..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/comparisons.h +++ /dev/null @@ -1,280 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -template -inline bool EqualFn(T lhs, T rhs) { - return lhs == rhs; -} - -template -inline bool NotEqualFn(T lhs, T rhs) { - return lhs != rhs; -} - -template -inline bool GreaterFn(T lhs, T rhs) { - return lhs > rhs; -} -template -inline bool GreaterEqualFn(T lhs, T rhs) { - return lhs >= rhs; -} -template -inline bool LessFn(T lhs, T rhs) { - return lhs < rhs; -} -template -inline bool LessEqualFn(T lhs, T rhs) { - return lhs <= rhs; -} - -template -using ComparisonFn = bool (*)(T, T); - -template F> -inline void ComparisonImpl( - const ComparisonParams& op_params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, bool* output_data) { - const int64_t flatsize = - MatchingFlatSize(input1_shape, input2_shape, output_shape); - for (int64_t i = 0; i < flatsize; ++i) { - output_data[i] = F(input1_data[i], input2_data[i]); - } -} - -template F> -inline void Comparison(const ComparisonParams& op_params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, bool* output_data) { - ComparisonImpl(op_params, input1_shape, input1_data, input2_shape, - input2_data, output_shape, output_data); -} - -template F> -inline void ComparisonWithScaling( - const ComparisonParams& op_params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, bool* output_data) { - int left_shift = op_params.left_shift; - int32_t input1_offset = op_params.input1_offset; - int32_t input1_multiplier = op_params.input1_multiplier; - int input1_shift = op_params.input1_shift; - int32_t input2_offset = op_params.input2_offset; - int32_t input2_multiplier = op_params.input2_multiplier; - int input2_shift = op_params.input2_shift; - - const int64_t flatsize = - MatchingFlatSize(input1_shape, input2_shape, output_shape); - for (int64_t i = 0; i < flatsize; ++i) { - const int32_t input1_val = input1_offset + input1_data[i]; - const int32_t input2_val = input2_offset + input2_data[i]; - const int32_t shifted_input1_val = input1_val * (1 << left_shift); - const int32_t shifted_input2_val = input2_val * (1 << left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, input1_multiplier, input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, input2_multiplier, input2_shift); - output_data[i] = F(scaled_input1_val, scaled_input2_val); - } -} - -struct BroadcastComparison4DSlowCommon { - const RuntimeShape output_shape; - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; -}; - -inline BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess( - const RuntimeShape& unextended_input1_shape, - const RuntimeShape& unextended_input2_shape, - const RuntimeShape& unextended_output_shape) { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - return {RuntimeShape::ExtendedShape(4, unextended_output_shape), desc1, - desc2}; -} - -template F> -inline void BroadcastComparison4DSlowImpl( - const ComparisonParams& op_params, - const RuntimeShape& unextended_input1_shape, const T* input1_data, - const RuntimeShape& unextended_input2_shape, const T* input2_data, - const RuntimeShape& unextended_output_shape, bool* output_data) { - const BroadcastComparison4DSlowCommon dims = - BroadcastComparison4DSlowPreprocess(unextended_input1_shape, - unextended_input2_shape, - unextended_output_shape); - - for (int b = 0; b < dims.output_shape.Dims(0); ++b) { - for (int y = 0; y < dims.output_shape.Dims(1); ++y) { - for (int x = 0; x < dims.output_shape.Dims(2); ++x) { - for (int c = 0; c < dims.output_shape.Dims(3); ++c) { - output_data[Offset(dims.output_shape, b, y, x, c)] = - F(input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)], - input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)]); - } - } - } - } -} - -template F> -inline void BroadcastComparison4DSlow(const ComparisonParams& op_params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - bool* output_data) { - BroadcastComparison4DSlowImpl(op_params, input1_shape, input1_data, - input2_shape, input2_data, - output_shape, output_data); -} - -template F> -inline void BroadcastComparison4DSlowWithScaling( - const ComparisonParams& op_params, - const RuntimeShape& unextended_input1_shape, const T* input1_data, - const RuntimeShape& unextended_input2_shape, const T* input2_data, - const RuntimeShape& unextended_output_shape, bool* output_data) { - const BroadcastComparison4DSlowCommon dims = - BroadcastComparison4DSlowPreprocess(unextended_input1_shape, - unextended_input2_shape, - unextended_output_shape); - - int left_shift = op_params.left_shift; - int32_t input1_offset = op_params.input1_offset; - int32_t input1_multiplier = op_params.input1_multiplier; - int input1_shift = op_params.input1_shift; - int32_t input2_offset = op_params.input2_offset; - int32_t input2_multiplier = op_params.input2_multiplier; - int input2_shift = op_params.input2_shift; - - for (int b = 0; b < dims.output_shape.Dims(0); ++b) { - for (int y = 0; y < dims.output_shape.Dims(1); ++y) { - for (int x = 0; x < dims.output_shape.Dims(2); ++x) { - for (int c = 0; c < dims.output_shape.Dims(3); ++c) { - const int32_t input1_val = - input1_offset + - input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)]; - const int32_t input2_val = - input2_offset + - input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)]; - const int32_t shifted_input1_val = input1_val * (1 << left_shift); - const int32_t shifted_input2_val = input2_val * (1 << left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, input1_multiplier, input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, input2_multiplier, input2_shift); - output_data[Offset(dims.output_shape, b, y, x, c)] = - F(scaled_input1_val, scaled_input2_val); - } - } - } - } -} - -#define TFLITE_COMPARISON_OP(name) \ - inline void name(const ComparisonParams& op_params, \ - const RuntimeShape& input1_shape, const float* input1_data, \ - const RuntimeShape& input2_shape, const float* input2_data, \ - const RuntimeShape& output_shape, bool* output_data) { \ - Comparison(op_params, input1_shape, input1_data, input2_shape, \ - input2_data, output_shape, output_data); \ - } \ - template \ - inline void name##NoScaling( \ - const ComparisonParams& op_params, const RuntimeShape& input1_shape, \ - const T* input1_data, const RuntimeShape& input2_shape, \ - const T* input2_data, const RuntimeShape& output_shape, \ - bool* output_data) { \ - ComparisonImpl(op_params, input1_shape, input1_data, \ - input2_shape, input2_data, output_shape, \ - output_data); \ - } \ - template \ - inline void name##WithScaling( \ - const ComparisonParams& op_params, const RuntimeShape& input1_shape, \ - const T* input1_data, const RuntimeShape& input2_shape, \ - const T* input2_data, const RuntimeShape& output_shape, \ - bool* output_data) { \ - ComparisonWithScaling(op_params, input1_shape, input1_data, \ - input2_shape, input2_data, \ - output_shape, output_data); \ - } \ - template \ - inline void Broadcast4DSlow##name##NoScaling( \ - const ComparisonParams& op_params, const RuntimeShape& input1_shape, \ - const T* input1_data, const RuntimeShape& input2_shape, \ - const T* input2_data, const RuntimeShape& output_shape, \ - bool* output_data) { \ - BroadcastComparison4DSlowImpl( \ - op_params, input1_shape, input1_data, input2_shape, input2_data, \ - output_shape, output_data); \ - } \ - inline void Broadcast4DSlow##name( \ - const ComparisonParams& op_params, const RuntimeShape& input1_shape, \ - const float* input1_data, const RuntimeShape& input2_shape, \ - const float* input2_data, const RuntimeShape& output_shape, \ - bool* output_data) { \ - BroadcastComparison4DSlow(op_params, input1_shape, input1_data, \ - input2_shape, input2_data, \ - output_shape, output_data); \ - } \ - template \ - inline void Broadcast4DSlow##name##WithScaling( \ - const ComparisonParams& op_params, const RuntimeShape& input1_shape, \ - const T* input1_data, const RuntimeShape& input2_shape, \ - const T* input2_data, const RuntimeShape& output_shape, \ - bool* output_data) { \ - BroadcastComparison4DSlowWithScaling( \ - op_params, input1_shape, input1_data, input2_shape, input2_data, \ - output_shape, output_data); \ - } -TFLITE_COMPARISON_OP(Equal); -TFLITE_COMPARISON_OP(NotEqual); -TFLITE_COMPARISON_OP(Greater); -TFLITE_COMPARISON_OP(GreaterEqual); -TFLITE_COMPARISON_OP(Less); -TFLITE_COMPARISON_OP(LessEqual); -#undef TFLITE_COMPARISON_OP - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/concatenation.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/concatenation.h deleted file mode 100644 index 998bb093..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/concatenation.h +++ /dev/null @@ -1,139 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -template -inline void Concatenation(const ConcatenationParams& params, - const RuntimeShape* const* input_shapes, - const Scalar* const* input_data, - const RuntimeShape& output_shape, - Scalar* output_data) { - int axis = params.axis; - int inputs_count = params.inputs_count; - const int concat_dimensions = output_shape.DimensionsCount(); - TFLITE_DCHECK_LT(axis, concat_dimensions); - - int64_t concat_size = 0; - for (int i = 0; i < inputs_count; i++) { - TFLITE_DCHECK_EQ(input_shapes[i]->DimensionsCount(), concat_dimensions); - for (int j = 0; j < concat_dimensions; j++) { - if (j != axis) { - MatchingDim(*input_shapes[i], j, output_shape, j); - } - } - concat_size += input_shapes[i]->Dims(axis); - } - TFLITE_DCHECK_EQ(concat_size, output_shape.Dims(axis)); - int64_t outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= output_shape.Dims(i); - } - // For all input arrays, - // FlatSize() = outer_size * Dims(axis) * base_inner_size; - int64_t base_inner_size = 1; - for (int i = axis + 1; i < concat_dimensions; ++i) { - base_inner_size *= output_shape.Dims(i); - } - - Scalar* output_ptr = output_data; - for (int k = 0; k < outer_size; k++) { - for (int i = 0; i < inputs_count; ++i) { - const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size; - const Scalar* input_ptr = input_data[i] + k * copy_size; - memcpy(output_ptr, input_ptr, copy_size * sizeof(Scalar)); - output_ptr += copy_size; - } - } -} - -// TODO(b/174275780): The quantized implementation of concatentation isn't fully -// quantized as it takes scale as a floating point value. This should be fixed -// when optimizng this routine further. -inline void ConcatenationWithScaling(const ConcatenationParams& params, - const RuntimeShape* const* input_shapes, - const uint8_t* const* input_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - int axis = params.axis; - const int32_t* input_zeropoint = params.input_zeropoint; - const float* input_scale = params.input_scale; - int inputs_count = params.inputs_count; - const int32_t output_zeropoint = params.output_zeropoint; - const float output_scale = params.output_scale; - - const int concat_dimensions = output_shape.DimensionsCount(); - TFLITE_DCHECK_LT(axis, concat_dimensions); - - int64_t concat_size = 0; - for (int i = 0; i < inputs_count; i++) { - TFLITE_DCHECK_EQ(input_shapes[i]->DimensionsCount(), concat_dimensions); - for (int j = 0; j < concat_dimensions; j++) { - if (j != axis) { - MatchingDim(*input_shapes[i], j, output_shape, j); - } - } - concat_size += input_shapes[i]->Dims(axis); - } - TFLITE_DCHECK_EQ(concat_size, output_shape.Dims(axis)); - int64_t outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= output_shape.Dims(i); - } - // For all input arrays, - // FlatSize() = outer_size * Dims(axis) * base_inner_size; - int64_t base_inner_size = 1; - for (int i = axis + 1; i < concat_dimensions; ++i) { - base_inner_size *= output_shape.Dims(i); - } - - const float inverse_output_scale = 1.f / output_scale; - uint8_t* output_ptr = output_data; - for (int k = 0; k < outer_size; k++) { - for (int i = 0; i < inputs_count; ++i) { - const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size; - const uint8_t* input_ptr = input_data[i] + k * copy_size; - if (input_zeropoint[i] == output_zeropoint && - input_scale[i] == output_scale) { - memcpy(output_ptr, input_ptr, copy_size); - } else { - const float scale = input_scale[i] * inverse_output_scale; - const float bias = -input_zeropoint[i] * scale; - for (int j = 0; j < copy_size; ++j) { - const int32_t value = static_cast(tflite::TfLiteRound( - input_ptr[j] * scale + bias)) + - output_zeropoint; - output_ptr[j] = static_cast( - std::max(std::min(255, value), 0)); - } - } - output_ptr += copy_size; - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/conv.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/conv.h deleted file mode 100644 index 5a6369d8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/conv.h +++ /dev/null @@ -1,264 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& filter_shape, - const float* filter_data, const RuntimeShape& bias_shape, - const float* bias_data, const RuntimeShape& output_shape, - float* output_data, const RuntimeShape& im2col_shape, - float* im2col_data) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const float output_activation_min = params.float_activation_min; - const float output_activation_max = params.float_activation_max; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - const int in_y_origin = (out_y * stride_height) - pad_height; - for (int out_x = 0; out_x < output_width; ++out_x) { - const int in_x_origin = (out_x * stride_width) - pad_width; - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - float total = 0.f; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - const int in_y = in_y_origin + dilation_height_factor * filter_y; - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - - if (!is_point_inside_image) { - continue; - } - - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - float input_value = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; - float filter_value = filter_data[Offset( - filter_shape, out_channel, filter_y, filter_x, in_channel)]; - total += (input_value * filter_value); - } - } - } - float bias_value = 0.0f; - if (bias_data) { - bias_value = bias_data[out_channel]; - } - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - ActivationFunctionWithMinMax(total + bias_value, - output_activation_min, - output_activation_max); - } - } - } - } -} - -inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& filter_shape, - const uint8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - uint8_t* output_data, const RuntimeShape& im2col_shape, - uint8_t* im2col_data, void* cpu_backend_context) { - (void)cpu_backend_context; // only used in optimized code. - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - const int in_y_origin = (out_y * stride_height) - pad_height; - for (int out_x = 0; out_x < output_width; ++out_x) { - const int in_x_origin = (out_x * stride_width) - pad_width; - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - const int in_y = in_y_origin + dilation_height_factor * filter_y; - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - - if (!is_point_inside_image) { - continue; - } - - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, out_channel, filter_y, filter_x, in_channel)]; - acc += - (filter_val + filter_offset) * (input_val + input_offset); - } - } - } - if (bias_data) { - acc += bias_data[out_channel]; - } - acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, - output_shift); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(acc); - } - } - } - } -} - -inline void HybridConvPerChannel( - const ConvParams& params, float* scaling_factors_ptr, - const RuntimeShape& input_shape, const int8_t* input_data, - const RuntimeShape& filter_shape, const int8_t* filter_data, - const RuntimeShape& bias_shape, const float* bias_data, - const RuntimeShape& output_shape, float* output_data, - const RuntimeShape& im2col_shape, int8_t* im2col_data, - const float* per_channel_scale, int32_t* input_offset) { - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const float output_activation_min = params.float_activation_min; - const float output_activation_max = params.float_activation_max; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // If the location is outside the bounds of the input image, - // use zero as a default value. - if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height)) { - int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - int32_t filter_val = - filter_data[Offset(filter_shape, out_channel, filter_y, - filter_x, in_channel)]; - acc += filter_val * (input_val - input_offset[batch]); - } - } - } - } - float acc_float = - acc * per_channel_scale[out_channel] * scaling_factors_ptr[batch]; - if (bias_data) { - acc_float += bias_data[out_channel]; - } - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - ActivationFunctionWithMinMax(acc_float, output_activation_min, - output_activation_max); - } - } - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h deleted file mode 100644 index 0cecb16b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h +++ /dev/null @@ -1,100 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -inline void DepthwiseConv( - const DepthwiseParams& params, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& filter_shape, - const float* filter_data, const RuntimeShape& bias_shape, - const float* bias_data, const RuntimeShape& output_shape, - float* output_data) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int depth_multiplier = params.depth_multiplier; - const float output_activation_min = params.float_activation_min; - const float output_activation_max = params.float_activation_max; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - - for (int b = 0; b < batches; ++b) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int ic = 0; ic < input_depth; ++ic) { - for (int m = 0; m < depth_multiplier; m++) { - const int oc = m + ic * depth_multiplier; - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - float total = 0.f; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // If the location is outside the bounds of the input image, - // use zero as a default value. - if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height)) { - float input_value = - input_data[Offset(input_shape, b, in_y, in_x, ic)]; - float filter_value = filter_data[Offset( - filter_shape, 0, filter_y, filter_x, oc)]; - total += (input_value * filter_value); - } - } - } - float bias_value = 0.0f; - if (bias_data) { - bias_value = bias_data[oc]; - } - output_data[Offset(output_shape, b, out_y, out_x, oc)] = - ActivationFunctionWithMinMax(total + bias_value, - output_activation_min, - output_activation_max); - } - } - } - } - } -} - -} // end namespace reference_ops -} // end namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h deleted file mode 100644 index 20bf83df..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +++ /dev/null @@ -1,297 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_UINT8_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_UINT8_H_ - -#include - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -// Used in tests and template parameters to control which version of depthwise -// convolution is called. Primarily for reference code, and specializations -// forced in tests. -enum class DepthwiseConvImplementation { - // Run all tests against kUseStandardEntry even if also testing another - // kernel, since we need to be sure that the main DepthwiseConv() function in - // optimized_ops.h dispatches to a correctly-executing kernel. - kNone = 0, // The "default" option: use the normal - // DepthwiseConv kernel (entry) function. - kUseGenericKernel, // Forced use of generic kernel. - kUseNeon3x3, // 3x3 kernel that uses NEON when available. - kUseNeon3x3DotProduct, // 3x3 kernel that uses dot-product enabled NEON - // when available. - kUseCModel3x3DotProduct, // 3x3 kernel, reference C model that is intended - // to match overall design NEON code. - kUseUnwound3x3DotProduct, // 3x3 kernel, reference C model with unwound loops - // and some arrays. - kUseIntrinsics3x3DotProduct, // 3x3 kernel using NEON intrinsics. -}; - -// Category of depthwise convolution output rounding. -enum class DepthwiseConvOutputRounding { - kNone = 0, // Invalid: specific method must be specified. - kAwayFromZero, // Original method: exact halves rounded away from zero. - kUpward, // Halves towards +infinity: adds 0.5 before truncate. - // This is where a future kNearestEven would be placed. -}; - -// Category of depthwise convolution depth multiplication. -enum class DepthwiseConvDepthMultiplication { - kNoMultiplication = 0, // Depth multiplier = 1. - kUnitInputDepth, // Input depth = 1, output depth = depth multiplier. -}; - -namespace reference_ops { -namespace depthwise_conv { - -template -inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier, - int shift) { - TFLITE_DCHECK_NE(output_rounding, DepthwiseConvOutputRounding::kNone); - return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); -} - -template <> -inline int32_t DepthwiseConvRound( - int32_t x, int32_t quantized_multiplier, int shift) { - return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); -} - -template <> -inline int32_t DepthwiseConvRound( - int32_t x, int32_t quantized_multiplier, int shift) { - using gemmlowp::SaturatingRoundingDoublingHighMul; - const int left_shift = shift > 0 ? shift : 0; - const int right_shift = shift > 0 ? 0 : -shift; - const int rounding_offset = right_shift > 0 ? 1 << (right_shift - 1) : 0; - return (SaturatingRoundingDoublingHighMul(x * (1 << left_shift), - quantized_multiplier) + - rounding_offset) >> - right_shift; -} - -template -struct DepthwiseConvBasicKernel { - static inline void Run( - const DepthwiseParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& filter_shape, - const uint8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - uint8_t* output_data) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int depth_multiplier = params.depth_multiplier; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - - for (int b = 0; b < batches; ++b) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int ic = 0; ic < input_depth; ++ic) { - for (int m = 0; m < depth_multiplier; m++) { - const int oc = m + ic * depth_multiplier; - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = - in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // If the location is outside the bounds of the input image, - // use zero as a default value. - if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height)) { - int32_t input_val = - input_data[Offset(input_shape, b, in_y, in_x, ic)]; - int32_t filter_val = filter_data[Offset( - filter_shape, 0, filter_y, filter_x, oc)]; - acc += (filter_val + filter_offset) * - (input_val + input_offset); - } - } - } - if (bias_data) { - acc += bias_data[oc]; - } - acc = DepthwiseConvRound(acc, output_multiplier, - output_shift); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[Offset(output_shape, b, out_y, out_x, oc)] = - static_cast(acc); - } - } - } - } - } - } - - // TODO(b/148596273): Reconcile reference versions, perhaps with common - // MultiplyByQuantizedMultiplier or DepthwiseConvRound function. - static inline void RunPerChannel( - const DepthwiseParams& params, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data) { - // Get parameters. - // TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro. - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int depth_multiplier = params.depth_multiplier; - const int32_t input_offset = params.input_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - const int32_t* output_multiplier = params.output_multiplier_per_channel; - const int32_t* output_shift = params.output_shift_per_channel; - - // Check dimensions of the tensors. - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - for (int m = 0; m < depth_multiplier; ++m) { - const int output_channel = m + in_channel * depth_multiplier; - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = - in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - if (is_point_inside_image) { - int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, 0, filter_y, filter_x, output_channel)]; - // Accumulate with 32 bits accumulator. - // In the nudging process during model quantization, we - // force real value of 0.0 be represented by a quantized - // value. This guarantees that the input_offset is a int8_t, - // even though it is represented using int32_t. int32_t += - // int8_t - // * (int8_t - int8_t) so the highest value we can get from - // each accumulation is [-127, 127] * ([-128, 127] - - // [-128, 127]), which is [-32512, 32512]. log2(32512) - // = 14.98, which means we can accumulate at least 2^16 - // multiplications without overflow. The accumulator is - // applied to a filter so the accumulation logic will hold - // as long as the filter size (filter_y * filter_x * - // in_channel) does not exceed 2^16, which is the case in - // all the models we have seen so far. - acc += filter_val * (input_val + input_offset); - } - } - } - if (bias_data) { - acc += bias_data[output_channel]; - } - acc = DepthwiseConvRound( - acc, output_multiplier[output_channel], - output_shift[output_channel]); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, - output_channel)] = static_cast(acc); - } - } - } - } - } - } -}; - -} // namespace depthwise_conv - -inline void DepthwiseConv( - const DepthwiseParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& filter_shape, - const uint8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - uint8_t* output_data) { - return depthwise_conv::DepthwiseConvBasicKernel< - DepthwiseConvOutputRounding::kAwayFromZero>::Run(params, input_shape, - input_data, filter_shape, - filter_data, bias_shape, - bias_data, output_shape, - output_data); -} - -} // namespace reference_ops -} // end namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_UINT8_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/dequantize.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/dequantize.h deleted file mode 100644 index b90951f9..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/dequantize.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ - -#include - -#include - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -// Dequantizes into a float without rounding. -template -inline void Dequantize(const tflite::DequantizationParams& op_params, - const RuntimeShape& input_shape, - const InputT* input_data, - const RuntimeShape& output_shape, OutputT* output_data) { - int32_t zero_point = op_params.zero_point; - const double scale = op_params.scale; - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - const int32_t val = input_data[i]; - const OutputT result = static_cast(scale * (val - zero_point)); - output_data[i] = result; - } -} - -// Dequantizes per-channel quantized tensor to float. -template -inline void PerChannelDequantize( - const tflite::PerChannelDequantizationParams& op_params, - const RuntimeShape& input_shape, const T* input_data, - const RuntimeShape& output_shape, float* output_data) { - // Ensure flat size is same. - MatchingFlatSize(input_shape, output_shape); - - const int32_t* zero_point = op_params.zero_point; - const float* scale = op_params.scale; - const int32_t quantized_dimension = op_params.quantized_dimension; - const int32_t num_dims = input_shape.DimensionsCount(); - const int32_t* dims_data = input_shape.DimsData(); - std::vector current_dim(num_dims, 0); - - do { - size_t offset = - ReducedOutputOffset(num_dims, reinterpret_cast(dims_data), - current_dim.data(), 0, nullptr); - const int channel = current_dim[quantized_dimension]; - const int32_t val = input_data[offset]; - const float result = - static_cast(scale[channel] * (val - zero_point[channel])); - output_data[offset] = result; - } while (NextIndex(num_dims, reinterpret_cast(dims_data), - current_dim.data())); -} - -} // namespace reference_ops - -} // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/floor.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/floor.h deleted file mode 100644 index 0693fd42..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/floor.h +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ - -#include - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline void Floor(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - int offset = i; - output_data[offset] = std::floor(input_data[offset]); - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/fully_connected.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/fully_connected.h deleted file mode 100644 index d5ad9d67..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/fully_connected.h +++ /dev/null @@ -1,320 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& weights_shape, - const float* weights_data, const RuntimeShape& bias_shape, - const float* bias_data, const RuntimeShape& output_shape, - float* output_data) { - const float output_activation_min = params.float_activation_min; - const float output_activation_max = params.float_activation_max; - // TODO(b/62193649): This really should be: - // const int batches = ArraySize(output_dims, 1); - // but the current --variable_batch hack consists in overwriting the 3rd - // dimension with the runtime batch size, as we don't keep track for each - // array of which dimension is the batch dimension in it. - const int output_dims_count = output_shape.DimensionsCount(); - const int weights_dims_count = weights_shape.DimensionsCount(); - const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1); - const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2, - output_shape, output_dims_count - 1); - const int accum_depth = weights_shape.Dims(weights_dims_count - 1); - for (int b = 0; b < batches; ++b) { - for (int out_c = 0; out_c < output_depth; ++out_c) { - float total = 0.f; - for (int d = 0; d < accum_depth; ++d) { - total += input_data[b * accum_depth + d] * - weights_data[out_c * accum_depth + d]; - } - float bias_value = 0.0f; - if (bias_data) { - bias_value = bias_data[out_c]; - } - output_data[out_c + output_depth * b] = ActivationFunctionWithMinMax( - total + bias_value, output_activation_min, output_activation_max); - } - } -} - -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& filter_shape, - const uint8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - uint8_t* output_data) { - const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - // TODO(b/62193649): This really should be: - // const int batches = ArraySize(output_dims, 1); - // but the current --variable_batch hack consists in overwriting the 3rd - // dimension with the runtime batch size, as we don't keep track for each - // array of which dimension is the batch dimension in it. - const int output_dim_count = output_shape.DimensionsCount(); - const int filter_dim_count = filter_shape.DimensionsCount(); - const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); - const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2, - output_shape, output_dim_count - 1); - const int accum_depth = filter_shape.Dims(filter_dim_count - 1); - for (int b = 0; b < batches; ++b) { - for (int out_c = 0; out_c < output_depth; ++out_c) { - int32_t acc = 0; - for (int d = 0; d < accum_depth; ++d) { - int32_t input_val = input_data[b * accum_depth + d]; - int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * (input_val + input_offset); - } - if (bias_data) { - acc += bias_data[out_c]; - } - acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc); - } - } -} - -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& filter_shape, - const uint8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data) { - const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - TFLITE_DCHECK_EQ(output_offset, 0); - // TODO(b/62193649): This really should be: - // const int batches = ArraySize(output_dims, 1); - // but the current --variable_batch hack consists in overwriting the 3rd - // dimension with the runtime batch size, as we don't keep track for each - // array of which dimension is the batch dimension in it. - const int output_dim_count = output_shape.DimensionsCount(); - const int filter_dim_count = filter_shape.DimensionsCount(); - const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); - const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2, - output_shape, output_dim_count - 1); - const int accum_depth = filter_shape.Dims(filter_dim_count - 1); - for (int b = 0; b < batches; ++b) { - for (int out_c = 0; out_c < output_depth; ++out_c) { - // Internal accumulation. - // Initialize accumulator with the bias-value. - int32_t accum = bias_data[out_c]; - // Accumulation loop. - for (int d = 0; d < accum_depth; ++d) { - int16_t input_val = input_data[b * accum_depth + d] + input_offset; - int16_t filter_val = - filter_data[out_c * accum_depth + d] + filter_offset; - accum += filter_val * input_val; - } - // Down-scale the final int32_t accumulator to the scale used by our - // (16-bit, typically 3 integer bits) fixed-point format. The quantized - // multiplier and shift here have been pre-computed offline - // (e.g. by toco). - accum = - MultiplyByQuantizedMultiplier(accum, output_multiplier, output_shift); - // Saturate, cast to int16_t, and store to output array. - accum = std::max(accum, output_activation_min - output_offset); - accum = std::min(accum, output_activation_max - output_offset); - accum += output_offset; - output_data[out_c + output_depth * b] = accum; - } - } -} - -inline void ShuffledFullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& weights_shape, - const uint8_t* shuffled_weights_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data, uint8_t* shuffled_input_workspace_data) { - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - - TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1); - TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); - TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); - // TODO(b/62193649): This really should be: - // const int batches = ArraySize(output_dims, 1); - // but the current --variable_batch hack consists in overwriting the 3rd - // dimension with the runtime batch size, as we don't keep track for each - // array of which dimension is the batch dimension in it. - const int output_dim_count = output_shape.DimensionsCount(); - const int weights_dim_count = weights_shape.DimensionsCount(); - const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); - const int output_depth = MatchingDim(weights_shape, weights_dim_count - 2, - output_shape, output_dim_count - 1); - const int accum_depth = weights_shape.Dims(weights_dim_count - 1); - TFLITE_DCHECK((accum_depth % 16) == 0); - TFLITE_DCHECK((output_depth % 4) == 0); - - // Shuffling and xoring of input activations into the workspace buffer - uint8_t* shuffled_input_workspace_ptr = shuffled_input_workspace_data; - if (batches == 1) { - for (int i = 0; i < accum_depth; i++) { - shuffled_input_workspace_data[i] = input_data[i] ^ 0x80; - } - } else if (batches == 4) { - for (int c = 0; c < accum_depth; c += 16) { - for (int b = 0; b < 4; b++) { - const uint8_t* src_data_ptr = input_data + b * accum_depth + c; - for (int j = 0; j < 16; j++) { - uint8_t src_val = *src_data_ptr++; - // Flip the sign bit, so that the kernel will only need to - // reinterpret these uint8_t values as int8_t, getting for free the - // subtraction of the zero_point value 128. - uint8_t dst_val = src_val ^ 0x80; - *shuffled_input_workspace_ptr++ = dst_val; - } - } - } - } else { - TFLITE_DCHECK(false); - return; - } - - // Actual computation - if (batches == 1) { - int16_t* output_ptr = output_data; - // Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd) - // so that just reinterpreting them as int8_t values is equivalent to - // subtracting 128 from them, thus implementing for free the subtraction of - // the zero_point value 128. - const int8_t* shuffled_weights_ptr = - reinterpret_cast(shuffled_weights_data); - // Likewise, we preshuffled and pre-xored the input data above. - const int8_t* shuffled_input_data = - reinterpret_cast(shuffled_input_workspace_data); - for (int c = 0; c < output_depth; c += 4) { - // Internal accumulation. - // Initialize accumulator with the bias-value. - int32_t accum[4] = {0}; - // Accumulation loop. - for (int d = 0; d < accum_depth; d += 16) { - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 16; j++) { - int8_t input_val = shuffled_input_data[d + j]; - int8_t weights_val = *shuffled_weights_ptr++; - accum[i] += weights_val * input_val; - } - } - } - for (int i = 0; i < 4; i++) { - // Add bias value - int32_t acc = accum[i] + bias_data[c + i]; - // Down-scale the final int32_t accumulator to the scale used by our - // (16-bit, typically 3 integer bits) fixed-point format. The quantized - // multiplier and shift here have been pre-computed offline - // (e.g. by toco). - acc = - MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); - // Saturate, cast to int16_t, and store to output array. - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_ptr[c + i] = acc; - } - } - } else if (batches == 4) { - int16_t* output_ptr = output_data; - // Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd) - // so that just reinterpreting them as int8_t values is equivalent to - // subtracting 128 from them, thus implementing for free the subtraction of - // the zero_point value 128. - const int8_t* shuffled_weights_ptr = - reinterpret_cast(shuffled_weights_data); - // Likewise, we preshuffled and pre-xored the input data above. - const int8_t* shuffled_input_data = - reinterpret_cast(shuffled_input_workspace_data); - for (int c = 0; c < output_depth; c += 4) { - const int8_t* shuffled_input_ptr = shuffled_input_data; - // Accumulation loop. - // Internal accumulation. - // Initialize accumulator with the bias-value. - int32_t accum[4][4]; - for (int i = 0; i < 4; i++) { - for (int b = 0; b < 4; b++) { - accum[i][b] = 0; - } - } - for (int d = 0; d < accum_depth; d += 16) { - for (int i = 0; i < 4; i++) { - for (int b = 0; b < 4; b++) { - for (int j = 0; j < 16; j++) { - int8_t input_val = shuffled_input_ptr[16 * b + j]; - int8_t weights_val = shuffled_weights_ptr[16 * i + j]; - accum[i][b] += weights_val * input_val; - } - } - } - shuffled_input_ptr += 64; - shuffled_weights_ptr += 64; - } - for (int i = 0; i < 4; i++) { - for (int b = 0; b < 4; b++) { - // Add bias value - int32_t acc = accum[i][b] + bias_data[c + i]; - // Down-scale the final int32_t accumulator to the scale used by our - // (16-bit, typically 3 integer bits) fixed-point format. The - // quantized multiplier and shift here have been pre-computed offline - // (e.g. by toco). - acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, - output_shift); - // Saturate, cast to int16_t, and store to output array. - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_ptr[b * output_depth + c + i] = acc; - } - } - } - } else { - TFLITE_DCHECK(false); - return; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/hard_swish.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/hard_swish.h deleted file mode 100644 index cda1b5cf..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/hard_swish.h +++ /dev/null @@ -1,166 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_ - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -inline int16_t SaturatingLeftShift(int16_t value, int amount) { - int32_t result = static_cast(value) * (1 << amount); - result = std::min(result, std::numeric_limits::max()); - result = std::max(result, std::numeric_limits::min()); - return result; -} - -// Similar to ARM instruction SQDMULH. -// Similar to gemmlowp::SaturatingRoundingDoublingHighMul except -// rounding to zero instead of to nearest (SQRDMULH). -inline std::int16_t SaturatingDoublingHighMul(std::int16_t a, std::int16_t b) { - bool overflow = a == b && a == std::numeric_limits::min(); - std::int32_t a_32(a); - std::int32_t b_32(b); - std::int32_t ab_32 = a_32 * b_32; - std::int16_t ab_x2_high16 = static_cast((ab_32) / (1 << 15)); - return overflow ? std::numeric_limits::max() : ab_x2_high16; -} - -template -inline void HardSwish(const RuntimeShape& input_shape, const T* input_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("ReferenceHardSwish/Float"); - auto matching_size = MatchingFlatSize(input_shape, output_shape); - const T* in_end = input_data + matching_size; - for (; input_data < in_end; input_data++, output_data++) { - const float in = *input_data; - *output_data = - in * std::min(static_cast(6), std::max(static_cast(0), in + 3)) / - 6; - } -} - -template -inline void HardSwish(const HardSwishParams& params, - const RuntimeShape& input_shape, const T* input_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("ReferenceHardSwish/Quantized"); - - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - const int16_t input_value = input_data[i] - params.input_zero_point; - // Left-shift as much as we can without overflow/saturation to put - // significant bits in the high bits of our 16-bit fixedpoint values, so - // that fixed-point approximate computations below are as accurate as - // possible. - const int16_t input_value_on_hires_input_scale = input_value * (1 << 7); - // Compute the input value on essentially the output scale, just not - // right-shifted yet. This is the value that we'll use in the (x >= +3) - // case, and that in the general case we'll multiply against the "relu-ish" - // fixed-point multiplier in [0, 1]. - const int16_t input_value_on_preshift_output_scale = - gemmlowp::SaturatingRoundingDoublingHighMul( - input_value_on_hires_input_scale, - params.output_multiplier_fixedpoint_int16); - // Now compute the "relu-ish multiplier". In the (-3 <= x <= +3) case, that - // is just an affine rescaling of x from [-3, 3] to [0, 1]. In the general - // case, it is just that plus saturation at the boundaries of [-3, 3]. - // First, we rescale from [-3, 3] to [-1, 1], saturating. - // That is done by rescaling the input value with a fixed-point multiplier - // (reluish_multiplier_fixedpoint) and bit-shift such that we represent - // that input value on the scale where the real value 3.0f is represented - // by the quantized value 32768. (+32768 is actually not representable as - // int16_t, so this saturates at +32767, and that is seen empirically to be - // a negligible contribution to numerical error/bias). - // - // This code is careful to correctly implement any magnitude of multiplier, - // involving either a right shift or a left shift, with correct saturation - // behavior in the left-shift case. This forces this code to be more - // complicated, but is necessary for real applications: a partially - // trained quantized MobileNet v3-small model that motivated this code - // exhibits some large [min, max] range boundaries, of the order of - // magnitude of 10 or 100 depending on layers. - // - // The next few lines are basically just an ordinary - // MultiplyByQuantizedMultiplier, except that we are more careful here - // about the fine details of saturation when left-shifting, because here - // overflow in left-shift is a common case, not an anomaly as - // MultiplyByQuantizedMultiplier assumes. - int16_t reluish_value = input_value_on_hires_input_scale; - // Shift left, saturating, as much as we can while ensuring that this - // saturation will not contribute to the result. That is, left shift amount - // reduced by 1. - if (params.reluish_multiplier_exponent > 0) { - reluish_value = SaturatingLeftShift( - reluish_value, params.reluish_multiplier_exponent - 1); - } - // Apply the fixed-point multiplier, dividing the value by a divisor - // ranging in [1, 2]. - reluish_value = gemmlowp::SaturatingRoundingDoublingHighMul( - reluish_value, params.reluish_multiplier_fixedpoint_int16); - // Apply the last bit of left-shift. Thus, in the left-shifting case, if - // any saturation affects the result, it is happening here --- any - // saturation having occurred above is overwritten here, not affecting the - // result. - if (params.reluish_multiplier_exponent > 0) { - reluish_value = SaturatingLeftShift(reluish_value, 1); - } - // Shift right, in the right-shifting case. - if (params.reluish_multiplier_exponent < 0) { - reluish_value = gemmlowp::RoundingDivideByPOT( - reluish_value, -params.reluish_multiplier_exponent); - } - // At this point we have rescaled the value into a 16bit fixedpoint - // reluish_value in [-1, 1]. - // We now convert that to a 16bit fixedpoint value in [0, 1]. - reluish_value = (reluish_value + (1 << 15)) >> 1; - // Use of SaturatingDoublingHighMul here is important to cancel the biases - // from the above SaturatingRoundingDoublingHighMul. - // - // On a partially trained MobileNet-v3-small, - // - // | bias on | ImageNet - // | quantized | Top-1 - // Operation used here | values | accuracy (50k) - // --------------------------------------+------------+----------- - // SaturatingDoublingHighMul | -0.0024 | 58.920 - // SaturatingRoundingDoublingHighMul | -0.0067 | 58.064 - // - // In activations_test, this is covered by this testcase: - // QuantizedActivationsOpTest.HardSwishBias - // - const int16_t preshift_output_value = SaturatingDoublingHighMul( - reluish_value, input_value_on_preshift_output_scale); - // We were so far operating on the pre-shift output scale. Now we finally - // apply that output shift, arriving at the final output scale. - int16_t output_value = gemmlowp::RoundingDivideByPOT( - preshift_output_value, -params.output_multiplier_exponent); - output_value += params.output_zero_point; - output_value = - std::min(output_value, std::numeric_limits::max()); - output_value = - std::max(output_value, std::numeric_limits::min()); - output_data[i] = output_value; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/add.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/add.h deleted file mode 100644 index 10bee904..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ - -#include - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void CheckArithmeticParams(const ArithmeticParams& params) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - // Input offset is negative input zero point. Activation tensors are - // asymmetric quantized so they span the full int8 range. - TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits::min()); - TFLITE_DCHECK_GE(-params.input2_offset, std::numeric_limits::min()); - TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits::max()); - TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits::max()); -} - -inline void ElementWise( - int size, const ArithmeticParams& params, const int8_t* input1_data, - const int8_t* input2_data, int8_t* output_data, - void (*check_arithmetic_params)(const ArithmeticParams&), - int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { - CheckArithmeticParams(params); - for (int i = 0; i < size; ++i) { - output_data[i] = binary_func(input1_data[i], input2_data[i], params); - } -} - -inline void BroadcastBinaryFunction4DSlow( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const int8_t* input1_data, const RuntimeShape& input2_shape, - const int8_t* input2_data, const RuntimeShape& output_shape, - int8_t* output_data, - void (*check_arithmetic_params)(const ArithmeticParams&), - int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = binary_func( - input1_data[SubscriptToIndex(desc1, b, y, x, c)], - input2_data[SubscriptToIndex(desc2, b, y, x, c)], params); - } - } - } - } -} - -inline int8_t AddFunc(int8_t x, int8_t y, const ArithmeticParams& params) { - const int32_t input1_val = params.input1_offset + x; - const int32_t input2_val = params.input2_offset + y; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - return static_cast(clamped_output); -} - -// Element-wise add that can often be used for inner loop of broadcast add as -// well as the non-broadcast add. -inline void AddElementwise(int size, const ArithmeticParams& params, - const int8_t* input1_data, const int8_t* input2_data, - int8_t* output_data) { - ElementWise(size, params, input1_data, input2_data, output_data, - CheckArithmeticParams, AddFunc); -} - -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const int8_t* input1_data, - const RuntimeShape& input2_shape, const int8_t* input2_data, - const RuntimeShape& output_shape, int8_t* output_data) { - CheckArithmeticParams(params); - - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - AddElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int8_t* input1_data, - const RuntimeShape& input2_shape, - const int8_t* input2_data, - const RuntimeShape& output_shape, - int8_t* output_data) { - BroadcastBinaryFunction4DSlow(params, input1_shape, input1_data, input2_shape, - input2_data, output_shape, output_data, - CheckArithmeticParams, AddFunc); -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h deleted file mode 100644 index 3a4164d3..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +++ /dev/null @@ -1,221 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -// Fixed-point per-channel-quantization convolution reference kernel. -inline void ConvPerChannel( - const ConvParams& params, const int32_t* output_multiplier, - const int32_t* output_shift, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data) { - // Get parameters. - const int32_t input_offset = params.input_offset; // r = s(q - Z) - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int32_t output_offset = params.output_offset; - - // Set min and max value of the output. - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - - // Consistency check. - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - // Check dimensions of the tensors. - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - const int in_y_origin = (out_y * stride_height) - pad_height; - for (int out_x = 0; out_x < output_width; ++out_x) { - const int in_x_origin = (out_x * stride_width) - pad_width; - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - const int in_y = in_y_origin + dilation_height_factor * filter_y; - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - - if (!is_point_inside_image) { - continue; - } - - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, out_channel, filter_y, filter_x, in_channel)]; - // Accumulate with 32 bits accumulator. - // In the nudging process during model quantization, we force - // real value of 0.0 be represented by a quantized value. This - // guarantees that the input_offset is a int8_t, even though - // it is represented using int32_t. int32_t += int8_t * - // (int8_t - int8_t) so the highest value we can get from each - // accumulation is [-127, 127] * ([-128, 127] - - // [-128, 127]), which is [-32512, 32512]. log2(32512) - // = 14.98, which means we can accumulate at least 2^16 - // multiplications without overflow. The accumulator is - // applied to a filter so the accumulation logic will hold as - // long as the filter size (filter_y * filter_x * in_channel) - // does not exceed 2^16, which is the case in all the models - // we have seen so far. - // TODO(b/174275578): Add a check to make sure the - // accumulator depth is smaller than 2^16. - acc += filter_val * (input_val + input_offset); - } - } - } - - if (bias_data) { - acc += bias_data[out_channel]; - } - acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier[out_channel], output_shift[out_channel]); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(acc); - } - } - } - } -} - -// Fixed-point per-channel-quantization convolution reference kernel. -// 16-bit data and 8-bit filter -inline void ConvPerChannel( - const ConvParams& params, const int32_t* output_multiplier, - const int32_t* output_shift, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const std::int64_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data) { - // Get parameters. - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - - // Set min and max value of the output. - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - - // Consistency check. - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - // Check dimensions of the tensors. - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - const int in_y_origin = (out_y * stride_height) - pad_height; - for (int out_x = 0; out_x < output_width; ++out_x) { - const int in_x_origin = (out_x * stride_width) - pad_width; - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - std::int64_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - const int in_y = in_y_origin + dilation_height_factor * filter_y; - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - - if (!is_point_inside_image) { - continue; - } - - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, out_channel, filter_y, filter_x, in_channel)]; - // Accumulate with 64 bits accumulator. - // int64_t += int8_t * int16_t so the highest value we can - // get from each accumulation is [-127, 127] * ([-32768, - // 32767] - - // [-32768, 32767]), which is [-8322945, 8322945]. - // log2(8322945) = 22.99. - acc += filter_val * input_val; - } - } - } - if (bias_data) { - acc += bias_data[out_channel]; - } - int32_t scaled_acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier[out_channel], output_shift[out_channel]); - scaled_acc = std::max(scaled_acc, output_activation_min); - scaled_acc = std::min(scaled_acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(scaled_acc); - } - } - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h deleted file mode 100644 index f0ca09c7..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +++ /dev/null @@ -1,289 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { -inline void DepthwiseConvPerChannel( - const DepthwiseParams& params, const int32_t* output_multiplier, - const int32_t* output_shift, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data) { - // Get parameters. - // TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro. - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int depth_multiplier = params.depth_multiplier; - const int32_t input_offset = params.input_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - - // Check dimensions of the tensors. - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - for (int m = 0; m < depth_multiplier; ++m) { - const int output_channel = m + in_channel * depth_multiplier; - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - if (is_point_inside_image) { - int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, 0, filter_y, filter_x, output_channel)]; - // Accumulate with 32 bits accumulator. - // In the nudging process during model quantization, we force - // real value of 0.0 be represented by a quantized value. This - // guarantees that the input_offset is a int8_t, even though - // it is represented using int32_t. int32_t += int8_t * - // (int8_t - int8_t) so the highest value we can get from each - // accumulation is [-127, 127] * ([-128, 127] - - // [-128, 127]), which is [-32512, 32512]. log2(32512) - // = 14.98, which means we can accumulate at least 2^16 - // multiplications without overflow. The accumulator is - // applied to a filter so the accumulation logic will hold as - // long as the filter size (filter_y * filter_x * in_channel) - // does not exceed 2^16, which is the case in all the models - // we have seen so far. - // TODO(b/174275578): Add a check to make sure the - // accumulator depth is smaller than 2^16. - acc += filter_val * (input_val + input_offset); - } - } - } - if (bias_data) { - acc += bias_data[output_channel]; - } - acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier[output_channel], - output_shift[output_channel]); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, - output_channel)] = static_cast(acc); - } - } - } - } - } -} - -inline void DepthwiseConvPerChannel( - const DepthwiseParams& params, const int32_t* output_multiplier, - const int32_t* output_shift, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const std::int64_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data) { - // Get parameters. - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int depth_multiplier = params.depth_multiplier; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - - // Check dimensions of the tensors. - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - for (int m = 0; m < depth_multiplier; ++m) { - const int output_channel = m + in_channel * depth_multiplier; - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - std::int64_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - if (is_point_inside_image) { - int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, 0, filter_y, filter_x, output_channel)]; - // Accumulate with 64 bits accumulator. - // We assume maximum of 2^16 accumulations as with the 8-bit - // case so actually the value in the accumulator should not - // exceed 40 bits - acc += static_cast(filter_val) * - static_cast(input_val); - } - } - } - if (bias_data) { - acc += bias_data[output_channel]; - } - int32_t scaled_acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier[output_channel], - output_shift[output_channel]); - scaled_acc = std::max(scaled_acc, output_activation_min); - scaled_acc = std::min(scaled_acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, - output_channel)] = - static_cast(scaled_acc); - } - } - } - } - } -} - -inline void DepthwiseConvHybridPerChannel( - const DepthwiseParams& params, float* scaling_factors_ptr, - const RuntimeShape& input_shape, const int8_t* input_data, - const RuntimeShape& filter_shape, const int8_t* filter_data, - const RuntimeShape& bias_shape, const float* bias_data, - const RuntimeShape& output_shape, float* output_data, - const float* per_channel_scale, int32_t* input_offset) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int dilation_width_factor = params.dilation_width_factor; - const int dilation_height_factor = params.dilation_height_factor; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - const int depth_multiplier = params.depth_multiplier; - const float output_activation_min = params.float_activation_min; - const float output_activation_max = params.float_activation_max; - // Check dimensions of the tensors. - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int input_depth = input_shape.Dims(3); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int bias_depth = bias_shape.FlatSize(); - TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); - TFLITE_DCHECK_EQ(bias_depth, output_depth); - - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - for (int m = 0; m < depth_multiplier; ++m) { - const int output_channel = m + in_channel * depth_multiplier; - const int in_x_origin = (out_x * stride_width) - pad_width; - const int in_y_origin = (out_y * stride_height) - pad_height; - int32_t acc = 0; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - const int in_x = in_x_origin + dilation_width_factor * filter_x; - const int in_y = - in_y_origin + dilation_height_factor * filter_y; - // Zero padding by omitting the areas outside the image. - const bool is_point_inside_image = - (in_x >= 0) && (in_x < input_width) && (in_y >= 0) && - (in_y < input_height); - if (is_point_inside_image) { - int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - int32_t filter_val = filter_data[Offset( - filter_shape, 0, filter_y, filter_x, output_channel)]; - acc += filter_val * (input_val - input_offset[batch]); - } - } - } - float acc_float = static_cast(acc); - acc_float *= - per_channel_scale[output_channel] * scaling_factors_ptr[batch]; - if (bias_data && output_channel < bias_depth) { - acc_float += bias_data[output_channel]; - } - output_data[Offset(output_shape, batch, out_y, out_x, - output_channel)] = - ActivationFunctionWithMinMax(acc_float, output_activation_min, - output_activation_max); - } - } - } - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h deleted file mode 100644 index 2bc3e794..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +++ /dev/null @@ -1,108 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data) { - const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - const int filter_dim_count = filter_shape.DimensionsCount(); - const int batches = output_shape.Dims(0); - const int output_depth = output_shape.Dims(1); - TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); - const int accum_depth = filter_shape.Dims(filter_dim_count - 1); - for (int b = 0; b < batches; ++b) { - for (int out_c = 0; out_c < output_depth; ++out_c) { - int32_t acc = 0; - for (int d = 0; d < accum_depth; ++d) { - int32_t input_val = input_data[b * accum_depth + d]; - int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * (input_val + input_offset); - } - if (bias_data) { - acc += bias_data[out_c]; - } - acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc); - } - } -} - -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int64_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data) { - const int32_t filter_offset = params.weights_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); - - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - const int filter_dim_count = filter_shape.DimensionsCount(); - const int batches = output_shape.Dims(0); - const int output_depth = output_shape.Dims(1); - TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); - const int accum_depth = filter_shape.Dims(filter_dim_count - 1); - for (int b = 0; b < batches; ++b) { - for (int out_c = 0; out_c < output_depth; ++out_c) { - int64_t acc = 0; - for (int d = 0; d < accum_depth; ++d) { - int32_t input_val = input_data[b * accum_depth + d]; - int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * input_val; - } - if (bias_data) { - acc += bias_data[out_c]; - } - int32_t acc_scaled = - MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); - acc_scaled = std::max(acc_scaled, output_activation_min); - acc_scaled = std::min(acc_scaled, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc_scaled); - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h deleted file mode 100644 index 31f2de98..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void L2Normalization(int32_t input_zero_point, int32_t outer_size, - int32_t depth, const int8_t* input_data, - int8_t* output_data) { - static constexpr int8_t kMinInt8 = std::numeric_limits::min(); - static constexpr int8_t kMaxInt8 = std::numeric_limits::max(); - // The output scale must be in sync with Prepare(). - // Output is in 1/128 scale so the actual output range is nudged from [-1, 1] - // to [-1, 127/128]. - static constexpr int32_t kOutputScale = 7; - for (int outer_index = 0; outer_index < outer_size; ++outer_index) { - // int32_t = (int8_t - int8_t) ^ 2. - // ([-128, 127] - [-128, 127]) ^ 2 = [0, (2^8 - 1)^2] so the accumulator is - // safe from overflowing in at least 2^16 steps. - int32_t acc = 0; - for (int inner_index = 0; inner_index < depth; ++inner_index) { - int32_t input = - input_data[depth * outer_index + inner_index] - input_zero_point; - acc += input * input; - } - int32_t inv_l2norm_multiplier; - int inv_l2norm_shift; - GetInvSqrtQuantizedMultiplierExp(acc, kReverseShift, &inv_l2norm_multiplier, - &inv_l2norm_shift); - - for (int inner_index = 0; inner_index < depth; ++inner_index) { - int32_t input = - input_data[depth * outer_index + inner_index] - input_zero_point; - - // Rescale and downcast. Rescale is folded into the division. - int32_t output_in_q24 = MultiplyByQuantizedMultiplier( - input, inv_l2norm_multiplier, inv_l2norm_shift + kOutputScale); - output_in_q24 = - std::min(static_cast(kMaxInt8), - std::max(static_cast(kMinInt8), output_in_q24)); - output_data[depth * outer_index + inner_index] = - static_cast(output_in_q24); - } - } -} -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h deleted file mode 100644 index d1a15bd9..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +++ /dev/null @@ -1,106 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ - -#include -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void Logistic(int32_t input_zero_point, int32_t input_range_radius, - int32_t input_multiplier, int32_t input_left_shift, - int32_t input_size, const int8_t* input_data, - int8_t* output_data) { - // Integer bits must be in sync with Prepare() function. - static constexpr int32_t kInputIntegerBits = 4; - static constexpr int32_t kOutputIntegerBits = 8; - static constexpr int8_t kMinInt8 = std::numeric_limits::min(); - static constexpr int8_t kMaxInt8 = std::numeric_limits::max(); - static constexpr int32_t kOutputZeroPoint = -128; - - for (int i = 0; i < input_size; ++i) { - const int32_t input = - static_cast(input_data[i]) - input_zero_point; - if (input <= -input_range_radius) { - output_data[i] = kMinInt8; - } else if (input >= input_range_radius) { - output_data[i] = kMaxInt8; - } else { - const int32_t input_in_q4 = MultiplyByQuantizedMultiplier( - input, input_multiplier, input_left_shift); - using FixedPoint4 = gemmlowp::FixedPoint; - const int32_t output_in_q0 = - gemmlowp::logistic(FixedPoint4::FromRaw(input_in_q4)).raw(); - - // Rescale and downcast. - using gemmlowp::RoundingDivideByPOT; - int32_t output_in_q23 = - RoundingDivideByPOT(output_in_q0, 31 - kOutputIntegerBits); - output_in_q23 = std::min(std::max(output_in_q23 + kOutputZeroPoint, - static_cast(kMinInt8)), - static_cast(kMaxInt8)); - output_data[i] = static_cast(output_in_q23); - } - } -} - -inline void Logistic(int32_t input_multiplier, int32_t input_size, - const int16_t* ptr_input_data, int16_t* ptr_output_data) { - // We use the LUT for sigmoid and take into account, that - // tanh(x) = 2*sigmoid(2*x) - 1 - - int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1; - - for (int i = 0; i < input_size; ++i, ptr_input_data++, ptr_output_data++) { - int32_t input_data = (*ptr_input_data) * input_data_mul; - - // Scale by 3/4 to expand range [-8,8]->[-10.7,10.7] and - // we do interpolation on unsigned values. - uint32_t abs_input_data = 3 * abs(input_data); - - // We divide by 2 power of 9, because - // we need to divide by 2 in power of 7 for - // the input conversion + 1/4 from the scale above. - // Define uh as uint32_t type not to make this function overflow. - uint32_t uh = abs_input_data >> 9; - uint32_t result; - - if (uh >= 255) { - // Saturate to maximum. - result = 0x7FFF << 10; - } else { - uint32_t ua = sigmoid_table_uint16[uh]; - uint32_t ub = sigmoid_table_uint16[uh + 1]; - uint32_t ut = abs_input_data & 0x1ff; - // Interpolation is done using the fractional bit. - result = (ua << 9) + ut * (ub - ua); - } - - result = (input_data >= 0) ? (result + (1 << 9)) - : ((1 << (16 + 9)) - result + (1 << 9) - 1); - - // Back to 16-bit. - result >>= 10; - - *ptr_output_data = result; - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h deleted file mode 100644 index bd484270..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -template -inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier, - int32_t shift, const RuntimeShape& unextended_input_shape, - const integer_type* input_data, int32_t input_zero_point, - const RuntimeShape& unextended_output_shape, - integer_type* output_data, int32_t output_zero_point) { - // Current implementation only supports dimension equals 4 and simultaneous - // reduction over width and height. - TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4); - TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(4, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - const int output_batch = output_shape.Dims(0); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int output_depth = output_shape.Dims(3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int num_elements_in_axis = input_width * input_height; - - TFLITE_CHECK_EQ(op_params.axis_count, 2); - TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - TFLITE_CHECK_EQ(output_height, 1); - TFLITE_CHECK_EQ(output_width, 1); - - static constexpr int32_t kMinInt = std::numeric_limits::min(); - static constexpr int32_t kMaxInt = std::numeric_limits::max(); - - for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - int32_t acc = 0; - for (int in_h = 0; in_h < input_height; ++in_h) { - for (int in_w = 0; in_w < input_width; ++in_w) { - acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)] - - input_zero_point; - } - } - acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); - acc = acc > 0 ? (acc + num_elements_in_axis / 2) / num_elements_in_axis - : (acc - num_elements_in_axis / 2) / num_elements_in_axis; - acc += output_zero_point; - acc = std::min(std::max(acc, kMinInt), kMaxInt); - output_data[Offset(output_shape, out_b, 0, 0, out_d)] = - static_cast(acc); - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h deleted file mode 100644 index b80838aa..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +++ /dev/null @@ -1,131 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ - -#include "fixedpoint/fixedpoint.h" -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -template -inline void MulElementwise(int size, const ArithmeticParams& params, - const T* input1_data, const T* input2_data, - T* output_data) { - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[i] = static_cast(clamped_output); - } -} - -template -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const T* input1_data, - const RuntimeShape& input2_shape, const T* input2_data, - const RuntimeShape& output_shape, T* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - ruy::profiler::ScopeLabel label("Mul/8bit"); - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - MulElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -// Mul with 16 bit inputs and int8_t outputs. -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const int16_t* input1_data, - const RuntimeShape& input2_shape, const int16_t* input2_data, - const RuntimeShape& output_shape, int8_t* output_data) { - ruy::profiler::ScopeLabel label("Mul/Int16Int8"); - int32_t output_offset = params.output_offset; - int32_t output_activation_min = params.quantized_activation_min; - int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - // F0 uses 0 integer bits, range [-1, 1]. - using F0 = gemmlowp::FixedPoint; - - F0 unclamped_result = - F0::FromRaw(input1_data[i]) * F0::FromRaw(input2_data[i]); - int16_t rescaled_result = - gemmlowp::RoundingDivideByPOT(unclamped_result.raw(), 8); - int16_t clamped_result = std::min( - output_activation_max - output_offset, rescaled_result); - clamped_result = std::max(output_activation_min - output_offset, - clamped_result); - output_data[i] = output_offset + clamped_result; - } -} - -template -inline void BroadcastMul4DSlow( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("BroadcastMul4DSlow"); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - // The input shapes are extended as part of NdArrayDesc initialization. - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = std::min( - params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); - } - } - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h deleted file mode 100644 index 17944bc4..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +++ /dev/null @@ -1,258 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ - -#include -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void AveragePool(const PoolParams& params, - const RuntimeShape& input_shape, - const int8_t* input_data, - const RuntimeShape& output_shape, int8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - int32_t acc = 0; - int filter_count = 0; - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - acc += - input_data[Offset(input_shape, batch, in_y, in_x, channel)]; - filter_count++; - } - } - // Round to the closest integer value. - acc = acc > 0 ? (acc + filter_count / 2) / filter_count - : (acc - filter_count / 2) / filter_count; - acc = std::max(acc, params.quantized_activation_min); - acc = std::min(acc, params.quantized_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - static_cast(acc); - } - } - } - } -} - -inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& output_shape, - int8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - TFLITE_DCHECK_GE(params.quantized_activation_min, - std::numeric_limits::min()); - TFLITE_DCHECK_LE(params.quantized_activation_max, - std::numeric_limits::max()); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - int8_t max = std::numeric_limits::lowest(); - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - max = std::max( - max, - input_data[Offset(input_shape, batch, in_y, in_x, channel)]); - } - } - max = std::max(max, params.quantized_activation_min); - max = std::min(max, params.quantized_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - static_cast(max); - } - } - } - } -} - -inline void AveragePool(const PoolParams& params, - const RuntimeShape& input_shape, - const int16_t* input_data, - const RuntimeShape& output_shape, - int16_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - int32_t acc = 0; - int filter_count = 0; - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - acc += - input_data[Offset(input_shape, batch, in_y, in_x, channel)]; - filter_count++; - } - } - // Round to the closest integer value. - acc = acc > 0 ? (acc + filter_count / 2) / filter_count - : (acc - filter_count / 2) / filter_count; - acc = std::max(acc, params.quantized_activation_min); - acc = std::min(acc, params.quantized_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - static_cast(acc); - } - } - } - } -} - -inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& output_shape, - int16_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - TFLITE_DCHECK_GE(params.quantized_activation_min, - std::numeric_limits::min()); - TFLITE_DCHECK_LE(params.quantized_activation_max, - std::numeric_limits::max()); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - int16_t max = std::numeric_limits::lowest(); - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - max = std::max( - max, - input_data[Offset(input_shape, batch, in_y, in_x, channel)]); - } - } - max = std::max(max, params.quantized_activation_min); - max = std::min(max, params.quantized_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - static_cast(max); - } - } - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h deleted file mode 100644 index 81ff34fe..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ - -#include - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void Tanh(int32_t input_zero_point, int32_t input_range_radius, - int32_t input_multiplier, int32_t input_shift, - const RuntimeShape& input_shape, const int8_t* input_data, - const RuntimeShape& output_shape, int8_t* output_data) { - // Integer bits must be in sync with Prepare() function. - static constexpr int32_t kInputIntegerBits = 4; - static constexpr int32_t kOutputScale = 7; - static constexpr int32_t kMinInt8 = std::numeric_limits::min(); - static constexpr int32_t kMaxInt8 = std::numeric_limits::max(); - using F4 = gemmlowp::FixedPoint; - - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; ++i) { - const int32_t input = - static_cast(input_data[i]) - input_zero_point; - if (input <= -input_range_radius) { - output_data[i] = kMinInt8; - } else if (input >= input_range_radius) { - output_data[i] = kMaxInt8; - } else { - const int32_t input_in_q4 = - MultiplyByQuantizedMultiplier(input, input_multiplier, input_shift); - const int32_t output_in_q0 = - gemmlowp::tanh(F4::FromRaw(input_in_q4)).raw(); - - // Rescale and downcast. - using gemmlowp::RoundingDivideByPOT; - int32_t output_in_q24 = - RoundingDivideByPOT(output_in_q0, 31 - kOutputScale); - output_in_q24 = std::min(std::max(output_in_q24, kMinInt8), kMaxInt8); - output_data[i] = static_cast(output_in_q24); - } - } -} - -inline void Tanh(int32_t input_multiplier, int32_t input_left_shift, - const RuntimeShape& input_shape, const int16_t* ptr_input_data, - const RuntimeShape& output_shape, int16_t* ptr_output_data) { - // We use the LUT for sigmoid and take into account, that - // tanh(x) = 2*sigmoid(2*x) - 1 - - int32_t input_data_mul = (input_multiplier > 0) ? input_multiplier : 1; - - int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) { - int32_t input_data = (*ptr_input_data) * input_data_mul; - - if (input_left_shift == 1) { - input_data <<= 1; - } - - // Scale by 3/4 to expand range [-8,8]->[-10.7,10.7]. - uint32_t abs_input_data = 3 * abs(input_data); - uint32_t uh = abs_input_data >> 8; - int32_t result; - - if (uh >= 255) { - // Saturate to maximum. - result = 0xFFFF << 8; - } else { - uint32_t ua = sigmoid_table_uint16[uh]; - uint32_t ub = sigmoid_table_uint16[uh + 1]; - - uint8_t ut = abs_input_data & 0xFF; - - result = (ua << 8) + ut * (ub - ua); - } - - result = (input_data >= 0) - ? (result - (1 << (14 + 9)) + (1 << (9 - 2))) - : (-result + (1 << (14 + 9)) + (1 << (9 - 2)) - 1); - - // Convert back to 16-bit. - result >>= (9 - 1); - - *ptr_output_data = result; - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h deleted file mode 100644 index 284c0f21..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +++ /dev/null @@ -1,221 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { -namespace reference_integer_ops { - -// Fixed-point per-channel-quantization transpose convolution reference kernel. -inline void TransposeConv( - const ConvParams& params, const int32_t* output_multiplier, - const int32_t* output_shift, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, - int32_t* scratch_buffer) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int32_t input_offset = params.input_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_activation_min = std::numeric_limits::min(); - const int32_t output_activation_max = std::numeric_limits::max(); - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - - const int num_elements = output_shape.FlatSize(); - // We need to initialize scratch_buffer to all 0s, as we apply the same - // 'scatter' based trick as in float version. - memset(scratch_buffer, 0, num_elements * sizeof(int32_t)); - - // Loop through input elements one at a time. - for (int batch = 0; batch < batches; ++batch) { - for (int in_y = 0; in_y < input_height; ++in_y) { - for (int in_x = 0; in_x < input_width; ++in_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - // Loop through the output elements it will influence. - const int out_x_origin = (in_x * stride_width) - pad_width; - const int out_y_origin = (in_y * stride_height) - pad_height; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int out_channel = 0; out_channel < output_depth; - ++out_channel) { - // Compute output element location. - const int out_x = out_x_origin + filter_x; - const int out_y = out_y_origin + filter_y; - // We cannot accumulate out of bounds. - if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && - (out_y < output_height)) { - const int8_t input_value = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - const int8_t filter_value = - filter_data[Offset(filter_shape, out_channel, filter_y, - filter_x, in_channel)]; - scratch_buffer[Offset(output_shape, batch, out_y, out_x, - out_channel)] += - (input_value + input_offset) * filter_value; - } - } - } - } - } - } - } - } - - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, - out_channel)]; - if (bias_data) { - acc += bias_data[out_channel]; - } - acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier[out_channel], output_shift[out_channel]); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(acc); - } - } - } - } -} - -// int16_t input (zero_point=0), int8_t filter, int64 accumulator -inline void TransposeConv( - const ConvParams& params, const int32_t* output_multiplier, - const int32_t* output_shift, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const std::int64_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, - std::int64_t* scratch_buffer) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int32_t output_activation_min = std::numeric_limits::min(); - const int32_t output_activation_max = std::numeric_limits::max(); - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - - const int num_elements = output_shape.FlatSize(); - // We need to initialize scratch_buffer to all 0s, as we apply the same - // 'scatter' based trick as in float version. - memset(scratch_buffer, 0, num_elements * sizeof(std::int64_t)); - - // Loop through input elements one at a time. - for (int batch = 0; batch < batches; ++batch) { - for (int in_y = 0; in_y < input_height; ++in_y) { - for (int in_x = 0; in_x < input_width; ++in_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - // Loop through the output elements it will influence. - const int out_x_origin = (in_x * stride_width) - pad_width; - const int out_y_origin = (in_y * stride_height) - pad_height; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int out_channel = 0; out_channel < output_depth; - ++out_channel) { - // Compute output element location. - const int out_x = out_x_origin + filter_x; - const int out_y = out_y_origin + filter_y; - // We cannot accumulate out of bounds. - if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && - (out_y < output_height)) { - const int32_t input_value = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - const int32_t filter_value = - filter_data[Offset(filter_shape, out_channel, filter_y, - filter_x, in_channel)]; - scratch_buffer[Offset(output_shape, batch, out_y, out_x, - out_channel)] += - input_value * filter_value; - } - } - } - } - } - } - } - } - - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - std::int64_t acc = scratch_buffer[Offset(output_shape, batch, out_y, - out_x, out_channel)]; - if (bias_data) { - acc += bias_data[out_channel]; - } - int32_t scaled_acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier[out_channel], output_shift[out_channel]); - scaled_acc = std::max(scaled_acc, output_activation_min); - scaled_acc = std::min(scaled_acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(scaled_acc); - } - } - } - } -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/l2normalization.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/l2normalization.h deleted file mode 100644 index 7587d2b5..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/l2normalization.h +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline void L2Normalization(const tflite::L2NormalizationParams& op_params, - const RuntimeShape& input_shape, - const float* input_data, - const RuntimeShape& output_shape, - float* output_data, float epsilon = 1e-6) { - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - for (int i = 0; i < outer_size; ++i) { - float squared_l2_norm = 0; - for (int c = 0; c < depth; ++c) { - const float val = input_data[depth * i + c]; - squared_l2_norm += val * val; - } - float l2_norm = std::sqrt(squared_l2_norm); - l2_norm = std::max(l2_norm, epsilon); - for (int c = 0; c < depth; ++c) { - output_data[depth * i + c] = input_data[depth * i + c] / l2_norm; - } - } -} - -inline void L2Normalization(const tflite::L2NormalizationParams& op_params, - const RuntimeShape& input_shape, - const uint8_t* input_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int32_t input_zero_point = op_params.input_zero_point; - - for (int i = 0; i < outer_size; ++i) { - int32_t square_l2_norm = 0; - for (int c = 0; c < depth; c++) { - int32_t diff = input_data[depth * i + c] - input_zero_point; - square_l2_norm += diff * diff; - } - int32_t inv_l2norm_multiplier; - int inv_l2norm_shift; - GetInvSqrtQuantizedMultiplierExp(square_l2_norm, kReverseShift, - &inv_l2norm_multiplier, &inv_l2norm_shift); - for (int c = 0; c < depth; c++) { - int32_t diff = input_data[depth * i + c] - input_zero_point; - int32_t rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp( - 128 * diff, inv_l2norm_multiplier, inv_l2norm_shift); - int32_t unclamped_output_val = 128 + rescaled_diff; - int32_t output_val = - std::min(static_cast(255), - std::max(static_cast(0), unclamped_output_val)); - output_data[depth * i + c] = static_cast(output_val); - } - } -} - -} // namespace reference_ops -} // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/logistic.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/logistic.h deleted file mode 100644 index 64b7133b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/logistic.h +++ /dev/null @@ -1,132 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ - -#include - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { -namespace reference_ops { - -inline void Logistic(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const float cutoff_upper = 16.619047164916992188f; - const float cutoff_lower = -9.f; - - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - // Rational for using approximation in reference kernel. - // 0. This approximation gives enough precision for float. - // 1. This works around an issue on an embedded chipset where exp() does not - // return correctly as expected - exp(x) should return inf when overflown - // not 1.701417 IEEE 754 defines representation for inf. - // 2. This will speed up calculation and is matching the behavior in the - // optimized kernels. (check the definition of scalar_logistic_op) - - for (int i = 0; i < flat_size; i++) { - float val = input_data[i]; - float result; - if (val > cutoff_upper) { - result = 1.0f; - } else if (val < cutoff_lower) { - result = std::exp(val); - } else { - result = 1.f / (1.f + std::exp(-val)); - } - output_data[i] = result; - } -} - -// Convenience version that allows, for example, generated-code calls to be -// uniform between data types. -inline void Logistic(const LogisticParams&, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& output_shape, - float* output_data) { - // Drop params: not needed. - Logistic(input_shape, input_data, output_shape, output_data); -} - -inline void Logistic(const LogisticParams& params, - const RuntimeShape& input_shape, const int16_t* input_data, - const RuntimeShape& output_shape, int16_t* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - // F0 uses 0 integer bits, range [-1, 1]. - // This is the return type of math functions such as tanh, logistic, - // whose range is in [-1, 1]. - using F0 = gemmlowp::FixedPoint; - // F3 uses 3 integer bits, range [-8, 8], the input range expected here. - using F3 = gemmlowp::FixedPoint; - - const F3 input = F3::FromRaw(input_data[i]); - F0 output = gemmlowp::logistic(input); - output_data[i] = output.raw(); - } -} - -// Quantized int8_t logistic activation. Cheats by dequantizing and -// requantizing around the floating point logistic method. This implementation -// is slow on platforms without a floating point unit. - -// TODO(b/141211002): Delete this int8_t implementation once we can reuse the -// approach used in TFLite for int8_t Logistic. -inline void Logistic(const RuntimeShape& input_shape, const int8_t* input_data, - float input_scale, int input_zero_point, - const RuntimeShape& output_shape, int8_t* output_data, - float output_scale, int output_zero_point) { - const float cutoff_upper = 16.619047164916992188f; - const float cutoff_lower = -9.f; - - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - // Rational for using approximation in reference kernel. - // 0. This approximation gives enough precision for float. - // 1. This works around an issue on an embedded chipset where exp() does not - // return correctly as expected - exp(x) should return inf when overflown - // not 1.701417 IEEE 754 defines representation for inf. - // 2. This will speed up calculation and is matching the behavior in the - // optimized kernels. (check the definition of scalar_logistic_op) - - for (int i = 0; i < flat_size; i++) { - // Dequantize. - float val = - static_cast((input_data[i] - input_zero_point) * input_scale); - float result; - if (val > cutoff_upper) { - result = 1.0f; - } else if (val < cutoff_lower) { - result = std::exp(val); - } else { - result = 1.f / (1.f + std::exp(-val)); - } - // Requantize - int8_t output = - static_cast(result / output_scale + output_zero_point); - output_data[i] = output; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/maximum_minimum.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/maximum_minimum.h deleted file mode 100644 index cd11b419..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/maximum_minimum.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -template -void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape, - const T* input1_data, - const RuntimeShape& unextended_input2_shape, - const T* input2_data, - const RuntimeShape& unextended_output_shape, - T* output_data, Op op) { - // Uses element-wise calculation if broadcast is not required. - if (unextended_input1_shape == unextended_input2_shape) { - const int flat_size = - MatchingElementsSize(unextended_input1_shape, unextended_input2_shape, - unextended_output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = op(input1_data[i], input2_data[i]); - } - } else { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N); - - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast( - unextended_input1_shape, unextended_input2_shape, &desc1, &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), - &output_desc); - - auto maxmin_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - op(input1_data[SubscriptToIndex(desc1, indexes)], - input2_data[SubscriptToIndex(desc2, indexes)]); - }; - NDOpsHelper(output_desc, maxmin_func); - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/mul.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/mul.h deleted file mode 100644 index 0578b81b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/mul.h +++ /dev/null @@ -1,166 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ - -#include "tensorflow/lite/kernels/internal/common.h" - -namespace tflite { - -namespace reference_ops { - -// Element-wise mul that can often be used for inner loop of broadcast Mul as -// well as the non-broadcast Mul. -inline void MulElementwise(int size, const ArithmeticParams& params, - const uint8_t* input1_data, - const uint8_t* input2_data, uint8_t* output_data) { - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[i] = static_cast(clamped_output); - } -} - -template -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const T* input1_data, - const RuntimeShape& input2_shape, const T* input2_data, - const RuntimeShape& output_shape, T* output_data) { - T output_activation_min; - T output_activation_max; - GetActivationParams(params, &output_activation_min, &output_activation_max); - - const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] * input2_data[i], output_activation_min, - output_activation_max); - } -} - -inline void Mul(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const uint8_t* input1_data, - const RuntimeShape& input2_shape, const uint8_t* input2_data, - const RuntimeShape& output_shape, uint8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); - - MulElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void BroadcastMul4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const uint8_t* input1_data, - const RuntimeShape& input2_shape, - const uint8_t* input2_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - const int32_t input1_val = - params.input1_offset + - input1_data[SubscriptToIndex(desc1, b, y, x, c)]; - const int32_t input2_val = - params.input2_offset + - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - const int32_t unclamped_result = - params.output_offset + - MultiplyByQuantizedMultiplier(input1_val * input2_val, - params.output_multiplier, - params.output_shift); - const int32_t clamped_output = std::min( - params.quantized_activation_max, - std::max(params.quantized_activation_min, unclamped_result)); - output_data[Offset(extended_output_shape, b, y, x, c)] = - static_cast(clamped_output); - } - } - } - } -} - -template -void BroadcastMul4DSlow(const ArithmeticParams& params, - const RuntimeShape& unextended_input1_shape, - const T* input1_data, - const RuntimeShape& unextended_input2_shape, - const T* input2_data, - const RuntimeShape& unextended_output_shape, - T* output_data) { - T output_activation_min; - T output_activation_max; - GetActivationParams(params, &output_activation_min, &output_activation_max); - - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - output_data[Offset(output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] * - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - output_activation_min, output_activation_max); - } - } - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/neg.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/neg.h deleted file mode 100644 index e127883f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/neg.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -template -inline void Negate(const RuntimeShape& input_shape, const T* input_data, - const RuntimeShape& output_shape, T* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; ++i) { - output_data[i] = -input_data[i]; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/pad.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/pad.h deleted file mode 100644 index 2a040cef..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/pad.h +++ /dev/null @@ -1,162 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_ - -#include - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -// TFLite Pad supports activation tensors with up to 4 dimensions. -constexpr int PadKernelMaxDimensionCount() { return 4; } - -// There are two versions of pad: Pad and PadV2. In PadV2 there is a second -// scalar input that provides the padding value. Therefore pad_value_ptr can be -// equivalent to a simple input1_data. For Pad, it should point to a zero -// value. -// -// Note that two typenames are required, so that T=P=int32_t is considered a -// specialization distinct from P=int32_t. -template -inline void PadImpl(const tflite::PadParams& op_params, - const RuntimeShape& input_shape, const T* input_data, - const P* pad_value_ptr, const RuntimeShape& output_shape, - T* output_data) { - const RuntimeShape ext_input_shape = - RuntimeShape::ExtendedShape(PadKernelMaxDimensionCount(), input_shape); - const RuntimeShape ext_output_shape = - RuntimeShape::ExtendedShape(PadKernelMaxDimensionCount(), output_shape); - TFLITE_DCHECK_LE(op_params.left_padding_count, PadKernelMaxDimensionCount()); - TFLITE_DCHECK_LE(op_params.right_padding_count, PadKernelMaxDimensionCount()); - - // Runtime calls are currently fixed at 4 dimensions. Copy inputs so we can - // pad them to 4 dims (yes, we are "padding the padding"). - int left_padding_copy[PadKernelMaxDimensionCount()]; - for (int i = 0; i < PadKernelMaxDimensionCount(); i++) { - left_padding_copy[i] = 0; - } - for (int i = 0; i < op_params.left_padding_count; ++i) { - left_padding_copy[i + PadKernelMaxDimensionCount() - - op_params.left_padding_count] = op_params.left_padding[i]; - } - int right_padding_copy[PadKernelMaxDimensionCount()]; - for (int i = 0; i < PadKernelMaxDimensionCount(); i++) { - right_padding_copy[i] = 0; - } - for (int i = 0; i < op_params.right_padding_count; ++i) { - right_padding_copy[i + PadKernelMaxDimensionCount() - - op_params.right_padding_count] = - op_params.right_padding[i]; - } - - const int output_batch = ext_output_shape.Dims(0); - const int output_height = ext_output_shape.Dims(1); - const int output_width = ext_output_shape.Dims(2); - const int output_depth = ext_output_shape.Dims(3); - - const int left_b_padding = left_padding_copy[0]; - const int left_h_padding = left_padding_copy[1]; - const int left_w_padding = left_padding_copy[2]; - const int left_d_padding = left_padding_copy[3]; - - const int right_b_padding = right_padding_copy[0]; - const int right_h_padding = right_padding_copy[1]; - const int right_w_padding = right_padding_copy[2]; - const int right_d_padding = right_padding_copy[3]; - - const T pad_value = *pad_value_ptr; - - const T* in_ptr = input_data; - T* out_ptr = output_data; - for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_h = 0; out_h < output_height; ++out_h) { - for (int out_w = 0; out_w < output_width; ++out_w) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - if (out_b < left_b_padding || - out_b >= output_batch - right_b_padding || - out_h < left_h_padding || - out_h >= output_height - right_h_padding || - out_w < left_w_padding || - out_w >= output_width - right_w_padding || - out_d < left_d_padding || - out_d >= output_depth - right_d_padding) { - *out_ptr++ = pad_value; - } else { - *out_ptr++ = *in_ptr++; - } - } - } - } - } -} - -template -inline void Pad(const tflite::PadParams& op_params, - const RuntimeShape& input_shape, const T* input_data, - const P* pad_value_ptr, const RuntimeShape& output_shape, - T* output_data) { - PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape, - output_data); -} - -// The second (pad-value) input can be int32_t when, say, the first is uint8_t. -template -inline void Pad(const tflite::PadParams& op_params, - const RuntimeShape& input_shape, const T* input_data, - const int32_t* pad_value_ptr, const RuntimeShape& output_shape, - T* output_data) { - const T converted_pad_value = static_cast(*pad_value_ptr); - PadImpl(op_params, input_shape, input_data, &converted_pad_value, - output_shape, output_data); -} - -// This version avoids conflicting template matching. -template <> -inline void Pad(const tflite::PadParams& op_params, - const RuntimeShape& input_shape, const int32_t* input_data, - const int32_t* pad_value_ptr, const RuntimeShape& output_shape, - int32_t* output_data) { - PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape, - output_data); -} - -template -inline void PadImageStyle(const tflite::PadParams& op_params, - const RuntimeShape& input_shape, const T* input_data, - const P* pad_value_ptr, - const RuntimeShape& output_shape, T* output_data) { - Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape, - output_data); -} - -template -inline void PadImageStyle(const tflite::PadParams& op_params, - const RuntimeShape& input_shape, - const float* input_data, const P* pad_value_ptr, - const RuntimeShape& output_shape, - float* output_data) { - Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape, - output_data); -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/pooling.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/pooling.h deleted file mode 100644 index 0872f521..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/pooling.h +++ /dev/null @@ -1,297 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -inline void AveragePool(const PoolParams& params, - const RuntimeShape& input_shape, - const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - float total = 0.f; - float filter_count = 0; - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - total += - input_data[Offset(input_shape, batch, in_y, in_x, channel)]; - filter_count++; - } - } - const float average = total / filter_count; - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - ActivationFunctionWithMinMax(average, params.float_activation_min, - params.float_activation_max); - } - } - } - } -} - -inline void AveragePool(const PoolParams& params, - const RuntimeShape& input_shape, - const uint8_t* input_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - int32_t acc = 0; - int filter_count = 0; - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - acc += - input_data[Offset(input_shape, batch, in_y, in_x, channel)]; - filter_count++; - } - } - acc = (acc + filter_count / 2) / filter_count; - acc = std::max(acc, params.quantized_activation_min); - acc = std::min(acc, params.quantized_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - static_cast(acc); - } - } - } - } -} - -inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& output_shape, - float* output_data) { - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - float sum_squares = 0.f; - int filter_count = 0; - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - const float val = - input_data[Offset(input_shape, batch, in_y, in_x, channel)]; - sum_squares += val * val; - filter_count++; - } - } - const float l2pool_result = std::sqrt(sum_squares / filter_count); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - ActivationFunctionWithMinMax(l2pool_result, - params.float_activation_min, - params.float_activation_max); - } - } - } - } -} - -inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& output_shape, - float* output_data) { - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - float max = std::numeric_limits::lowest(); - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - max = std::max( - max, - input_data[Offset(input_shape, batch, in_y, in_x, channel)]); - } - } - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - ActivationFunctionWithMinMax(max, params.float_activation_min, - params.float_activation_max); - } - } - } - } -} - -inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& output_shape, - uint8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - TFLITE_DCHECK_GE(params.quantized_activation_min, 0); - TFLITE_DCHECK_LE(params.quantized_activation_max, 255); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int channel = 0; channel < depth; ++channel) { - const int in_x_origin = - (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = - (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = - std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = - std::min(params.filter_height, input_height - in_y_origin); - uint8_t max = 0; - for (int filter_y = filter_y_start; filter_y < filter_y_end; - ++filter_y) { - for (int filter_x = filter_x_start; filter_x < filter_x_end; - ++filter_x) { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - max = std::max( - max, - input_data[Offset(input_shape, batch, in_y, in_x, channel)]); - } - } - max = std::max(max, params.quantized_activation_min); - max = std::min(max, params.quantized_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - static_cast(max); - } - } - } - } -} -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/prelu.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/prelu.h deleted file mode 100644 index 02db5174..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/prelu.h +++ /dev/null @@ -1,109 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -// Broadcast prelu to output_shape for quantized uint8_t/int8_t data. -template -inline void BroadcastPrelu4DSlow( - const PreluParams& params, const RuntimeShape& input_shape, - const T* input_data, const RuntimeShape& alpha_shape, const T* alpha_data, - const RuntimeShape& output_shape, T* output_data) { - TFLITE_DCHECK_LE(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(alpha_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 4); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input_shape, alpha_shape, &desc1, &desc2); - - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - int output_index = Offset(extended_output_shape, b, y, x, c); - int input_index = SubscriptToIndex(desc1, b, y, x, c); - const int32_t input_value = - params.input_offset + input_data[input_index]; - int32_t output_value; - if (input_value >= 0) { - output_value = MultiplyByQuantizedMultiplier( - input_value, params.output_multiplier_1, params.output_shift_1); - } else { - auto alpha_index = SubscriptToIndex(desc2, b, y, x, c); - const int32_t alpha_value = - params.alpha_offset + alpha_data[alpha_index]; - - output_value = MultiplyByQuantizedMultiplier( - input_value * alpha_value, params.output_multiplier_2, - params.output_shift_2); - } - output_value += params.output_offset; - - const int32_t quantized_min = std::numeric_limits::min(); - const int32_t quantized_max = std::numeric_limits::max(); - const int32_t clamped_output = - std::min(quantized_max, std::max(quantized_min, output_value)); - output_data[output_index] = static_cast(clamped_output); - } - } - } - } -} - -template -inline void Prelu(const PreluParams& params, const RuntimeShape& input_shape, - const T* input_data, const RuntimeShape& alpha_shape, - const T* alpha_data, const RuntimeShape& output_shape, - T* output_data) { - const int32_t quantized_min = std::numeric_limits::min(); - const int32_t quantized_max = std::numeric_limits::max(); - - const int flat_size = - MatchingElementsSize(input_shape, alpha_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const int32_t input_value = params.input_offset + input_data[i]; - int32_t output_value; - if (input_value >= 0) { - output_value = MultiplyByQuantizedMultiplier( - input_value, params.output_multiplier_1, params.output_shift_1); - } else { - const int32_t alpha_value = params.alpha_offset + alpha_data[i]; - - output_value = MultiplyByQuantizedMultiplier(input_value * alpha_value, - params.output_multiplier_2, - params.output_shift_2); - } - output_value += params.output_offset; - - const int32_t clamped_output = - std::min(quantized_max, std::max(quantized_min, output_value)); - output_data[i] = static_cast(clamped_output); - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h deleted file mode 100644 index 40f779c5..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +++ /dev/null @@ -1,138 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -// Consolidates dimensions in broadcast inputs, checks for five-fold pattern. -// -// For example, if sequence of dimensions of one input is -// ..., 1, 3, 1, 7, 9, 5,... and the other is ..., 2, 3, 1, 7, 1, 1, ... -// we can consolidate these as -// ..., 1, 3*7, 9*5, ... and 2, 3*7, 1. -// -// The category is updated in the less-frequent case of shapes that are -// not suited to a fivefold-loop broadcast. -// -// Falls back to generic pattern when it does not know how to process properly. -// -// Returns true iff there is some sort of broadcast, which includes five-fold -// patterns and falling back to generic broadcast. -inline bool ProcessBroadcastShapes(const RuntimeShape& shape0, - const RuntimeShape& shape1, - tflite::ArithmeticParams* params) { - const int dims_count = - std::max(shape0.DimensionsCount(), shape1.DimensionsCount()); - - params->broadcast_category = BroadcastableOpCategory::kGenericBroadcast; - RuntimeShape scalar_shape(dims_count, 1); - - auto extended_shape0 = RuntimeShape::ExtendedShape(dims_count, shape0); - auto extended_shape1 = RuntimeShape::ExtendedShape(dims_count, shape1); - - // Check for "exact" match, implicitly accepting any scalar shapes. - if (extended_shape0 == extended_shape1) { - params->broadcast_category = BroadcastableOpCategory::kNonBroadcast; - return false; - } - - for (int i = dims_count - 1; i >= 0; --i) { - if (extended_shape0.Dims(i) == extended_shape1.Dims(i)) { - continue; - } else if (extended_shape0.Dims(i) == 1) { - params->broadcast_category = - BroadcastableOpCategory::kFirstInputBroadcastsFast; - break; - } else if (extended_shape1.Dims(i) == 1) { - params->broadcast_category = - BroadcastableOpCategory::kSecondInputBroadcastsFast; - break; - } else { - // This case is erroneous: there is a dimension that does not match and - // is not a broadcast from one shape to the other. - params->broadcast_category = BroadcastableOpCategory::kGenericBroadcast; - return true; - } - } - - if (params->broadcast_category != - BroadcastableOpCategory::kFirstInputBroadcastsFast && - params->broadcast_category != - BroadcastableOpCategory::kSecondInputBroadcastsFast) { - // This is unreachable because at least one else clause in the above loop - // must be reached. - TFLITE_DCHECK(false); - params->broadcast_category = BroadcastableOpCategory::kNonBroadcast; - return false; - } - - // From this point it is assumed contractually that corresponding dimensions - // in shape0 and shape1 are either (a) equal or (b) one or other equals 1. - const bool swap_inputs = params->broadcast_category == - BroadcastableOpCategory::kSecondInputBroadcastsFast; - const RuntimeShape* shape_a = - swap_inputs ? &extended_shape1 : &extended_shape0; - const RuntimeShape* shape_b = - swap_inputs ? &extended_shape0 : &extended_shape1; - - int i = dims_count - 1; - params->broadcast_shape[0] = 1; - params->broadcast_shape[1] = 1; - params->broadcast_shape[2] = 1; - params->broadcast_shape[3] = 1; - params->broadcast_shape[4] = 1; - // y_0 is greedy: include dims if both or neither equal 1: in other words, - // test for equality rather than (shape_a->Dims(i) != 1). - while (i >= 0 && shape_a->Dims(i) == shape_b->Dims(i)) { - params->broadcast_shape[4] *= shape_b->Dims(i); - --i; - } - // Here either input_a or input_b has dim of 1 (if i >= 0). If it is input_b - // that has the unit dimension, the next two loops are not entered. - while (i >= 0 && shape_a->Dims(i) == 1) { - params->broadcast_shape[3] *= shape_b->Dims(i); - --i; - } - while (i >= 0 && shape_a->Dims(i) == shape_b->Dims(i)) { - params->broadcast_shape[2] *= shape_a->Dims(i); - --i; - } - // Here either input_a or input_b has dim of 1 (if i >= 0). - while (i >= 0 && shape_b->Dims(i) == 1) { - params->broadcast_shape[1] *= shape_a->Dims(i); - --i; - } - while (i >= 0 && shape_a->Dims(i) == shape_b->Dims(i)) { - params->broadcast_shape[0] *= shape_b->Dims(i); - --i; - } - - // Rarer case is when the broadcast dimensions cannot be handled by a fivefold - // loop. - if (i >= 0) { - params->broadcast_category = BroadcastableOpCategory::kGenericBroadcast; - } - return true; -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/quantize.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/quantize.h deleted file mode 100644 index 6f3f9aeb..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/quantize.h +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ - -#include -#include - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -template -inline void AffineQuantize(const tflite::QuantizationParams& op_params, - const RuntimeShape& input_shape, - const InputT* input_data, - const RuntimeShape& output_shape, - OutputT* output_data) { - const int32_t zero_point = op_params.zero_point; - const double scale = op_params.scale; - const int flat_size = MatchingFlatSize(input_shape, output_shape); - static constexpr int32_t min_val = std::numeric_limits::min(); - static constexpr int32_t max_val = std::numeric_limits::max(); - - for (int i = 0; i < flat_size; i++) { - const InputT val = input_data[i]; - int32_t unclamped = - static_cast(TfLiteRound(val / static_cast(scale))) + - zero_point; - int32_t clamped = std::min(std::max(unclamped, min_val), max_val); - output_data[i] = clamped; - } -} - -} // namespace reference_ops - -} // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/reduce.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/reduce.h deleted file mode 100644 index a7c86ddb..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/reduce.h +++ /dev/null @@ -1,412 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/max.h" -#include "tensorflow/lite/kernels/internal/min.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -// A generic reduce method that can be used for reduce_sum, reduce_mean, etc. -// This method iterates through input data and reduce elements along the -// dimensions given in axis. -template -inline bool Reduce(const In* input_data, const int* input_dims, - const int* output_dims, const int input_num_dims, - const int output_num_dims, const int* axis, - const int num_axis, int* input_iter, - Out reducer(const Out current, const In in), - Out* output_data) { - // Reset input iterator. - for (int idx = 0; idx < input_num_dims; ++idx) { - input_iter[idx] = 0; - } - // Iterate through input_data. - do { - size_t input_offset = - ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr); - size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, - input_iter, num_axis, axis); - output_data[output_offset] = - reducer(output_data[output_offset], input_data[input_offset]); - } while (NextIndex(input_num_dims, input_dims, input_iter)); - return true; -} - -// This method parses the input 'axis' to remove duplicates and handle negative -// values, and returns a valid 'out_axis' -inline bool ResolveAxis(const int num_dims, const int* axis, - const int64_t num_axis, int* out_axis, - int* out_num_axis) { - *out_num_axis = 0; // Just in case. - // Short-circuit axis resolution for scalars; the axis will go unused. - if (num_dims == 0) { - return true; - } - // o(n^2) is fine since out_num_axis should be really small, mostly <= 4 - for (int64_t idx = 0; idx < num_axis; ++idx) { - // Handle negative index. A positive index 'p_idx' can be represented as a - // negative index 'n_idx' as: n_idx = p_idx-num_dims - // eg: For num_dims=3, [0, 1, 2] is the same as [-3, -2, -1] */ - int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx]; - TFLITE_DCHECK(current >= 0 && current < num_dims); - if (current < 0 || current >= num_dims) { - return false; - } - bool is_dup = false; - for (int j = 0; j < *out_num_axis; ++j) { - if (out_axis[j] == current) { - is_dup = true; - break; - } - } - if (!is_dup) { - out_axis[*out_num_axis] = current; - *out_num_axis += 1; - } - } - return true; -} - -// This method expects that output_data has been initialized. -template -inline bool ReduceSumImpl(const In* input_data, const int* input_dims, - const int* output_dims, const int input_num_dims, - const int output_num_dims, const int* axis, - const int num_axis, int* input_iter, - Out* output_data) { - auto reducer = [](const Out current, const In in) -> Out { - const Out actual_in = static_cast(in); - return current + actual_in; - }; - return Reduce(input_data, input_dims, output_dims, input_num_dims, - output_num_dims, axis, num_axis, input_iter, reducer, - output_data); -} - -template -inline bool InitTensorDataForReduce(const int* dims, const int num_dims, - const T init_value, T* data) { - size_t num_elements = 1; - for (int idx = 0; idx < num_dims; ++idx) { - size_t current = static_cast(dims[idx]); - // Overflow prevention. - if (num_elements > std::numeric_limits::max() / current) { - return false; - } - num_elements *= current; - } - for (size_t idx = 0; idx < num_elements; ++idx) { - data[idx] = init_value; - } - return true; -} - -// Computes the generic value (i.e., sum/max/min/prod) of elements across -// dimensions given in axis. It needs to pass in init_value and reducer. -template -inline bool ReduceGeneric(const T* input_data, const int* input_dims, - const int input_num_dims, T* output_data, - const int* output_dims, const int output_num_dims, - const int* axis, const int64_t num_axis_dimensions, - bool keep_dims, int* temp_index, int* resolved_axis, - T init_value, - T reducer(const T current, const T in)) { - // Return early when input shape has zero dim. - for (int i = 0; i < input_num_dims; ++i) { - if (input_dims[i] == 0) return true; - } - - // Reset output data. - if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value, - output_data)) { - return false; - } - - // Resolve axis. - int num_resolved_axis = 0; - if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, - &num_resolved_axis)) { - return false; - } - - return Reduce(input_data, input_dims, output_dims, input_num_dims, - output_num_dims, resolved_axis, num_resolved_axis, - temp_index, reducer, output_data); -} - -// Computes the mean of elements across dimensions given in axis. -// It does so in two stages, first calculates the sum of elements along the axis -// then divides it by the number of element in axis. -template -inline bool Mean(const T* input_data, const int* input_dims, - const int input_num_dims, T* output_data, - const int* output_dims, const int output_num_dims, - const int* axis, const int num_axis_dimensions, bool keep_dims, - int* temp_index, int* resolved_axis, U* temp_sum) { - ruy::profiler::ScopeLabel label("Mean"); - // Reset output data. - size_t num_outputs = 1; - for (int idx = 0; idx < output_num_dims; ++idx) { - size_t current = static_cast(output_dims[idx]); - // Overflow prevention. - if (num_outputs > std::numeric_limits::max() / current) { - return false; - } - num_outputs *= current; - } - for (size_t idx = 0; idx < num_outputs; ++idx) { - output_data[idx] = T(); - temp_sum[idx] = U(); - } - - // Resolve axis. - int num_resolved_axis = 0; - if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, - &num_resolved_axis)) { - return false; - } - - if (!ReduceSumImpl(input_data, input_dims, output_dims, input_num_dims, - output_num_dims, resolved_axis, num_resolved_axis, - temp_index, temp_sum)) { - return false; - } - - // Calculate mean by dividing output_data by num of aggregated element. - size_t num_elements_in_axis = 1; - for (int idx = 0; idx < num_resolved_axis; ++idx) { - size_t current = static_cast(input_dims[resolved_axis[idx]]); - // Overflow prevention. - if (current > (std::numeric_limits::max() / num_elements_in_axis)) { - return false; - } - num_elements_in_axis *= current; - } - - if (num_elements_in_axis > 0) { - for (size_t idx = 0; idx < num_outputs; ++idx) { - output_data[idx] = - static_cast(temp_sum[idx] / static_cast(num_elements_in_axis)); - } - } - return true; -} - -template -inline void Mean(const tflite::MeanParams& op_params, - const RuntimeShape& unextended_input_shape, - const T* input_data, - const RuntimeShape& unextended_output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("Mean4D"); - - // Current implementation only supports dimension equals 4 and simultaneous - // reduction over width and height. - TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4); - TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(4, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - const int output_batch = output_shape.Dims(0); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int output_depth = output_shape.Dims(3); - - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - - TFLITE_CHECK_EQ(op_params.axis_count, 2); - TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - TFLITE_CHECK_EQ(output_height, 1); - TFLITE_CHECK_EQ(output_width, 1); - - for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - float value = 0; - for (int in_h = 0; in_h < input_height; ++in_h) { - for (int in_w = 0; in_w < input_width; ++in_w) { - value += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)]; - } - } - output_data[Offset(output_shape, out_b, 0, 0, out_d)] = - value / (input_width * input_height); - } - } -} - -inline void Mean(const tflite::MeanParams& op_params, - const RuntimeShape& unextended_input_shape, - const uint8_t* input_data, int32_t input_zero_point, - float input_scale, const RuntimeShape& unextended_output_shape, - uint8_t* output_data, int32_t output_zero_point, - float output_scale) { - ruy::profiler::ScopeLabel label("Mean4D/Uint8"); - - // Current implementation only supports dimension equals 4 and simultaneous - // reduction over width and height. - TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4); - TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(4, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - const int output_batch = output_shape.Dims(0); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int output_depth = output_shape.Dims(3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const float num_elements_in_axis = input_width * input_height; - - TFLITE_CHECK_EQ(op_params.axis_count, 2); - TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - TFLITE_CHECK_EQ(output_height, 1); - TFLITE_CHECK_EQ(output_width, 1); - - constexpr int32_t kMinValue = std::numeric_limits::min(); - constexpr int32_t kMaxValue = std::numeric_limits::max(); - - int32_t bias = - output_zero_point - - static_cast(input_zero_point * input_scale / output_scale); - double real_scale = - static_cast(input_scale / (num_elements_in_axis * output_scale)); - - int32_t multiplier; - int shift; - QuantizeMultiplier(real_scale, &multiplier, &shift); - for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - int32_t acc = 0; - for (int in_h = 0; in_h < input_height; ++in_h) { - for (int in_w = 0; in_w < input_width; ++in_w) { - acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)]; - } - } - acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); - acc += bias; - acc = std::min(std::max(acc, kMinValue), kMaxValue); - output_data[Offset(output_shape, out_b, 0, 0, out_d)] = - static_cast(acc); - } - } -} - -// Computes the mean of elements across dimensions given in axis. -// It does so in two stages, first calculates the sum of elements along the axis -// then divides it by the number of element in axis for quantized values. -template -inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point, - float input_scale, const int* input_dims, - const int input_num_dims, T* output_data, - int32_t output_zero_point, float output_scale, - const int* output_dims, - const int output_num_dims, const int* axis, - const int num_axis_dimensions, bool keep_dims, - int* temp_index, int* resolved_axis, U* temp_sum, - bool compute_sum) { - const bool uint8_case = std::is_same::value; - const bool int16_case = std::is_same::value; - if (uint8_case) { - ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Uint8" : "Mean/Uint8"); - } else if (int16_case) { - ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int16" : "Mean/Int16"); - } else { - ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int8" : "Mean/Int8"); - } - // Reset output data. - size_t num_outputs = 1; - for (int idx = 0; idx < output_num_dims; ++idx) { - size_t current = static_cast(output_dims[idx]); - // Overflow prevention. - if (num_outputs > std::numeric_limits::max() / current) { - return false; - } - num_outputs *= current; - } - for (size_t idx = 0; idx < num_outputs; ++idx) { - output_data[idx] = T(); - temp_sum[idx] = U(); - } - - // Resolve axis. - int num_resolved_axis = 0; - if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, - &num_resolved_axis)) { - return false; - } - - if (!ReduceSumImpl(input_data, input_dims, output_dims, input_num_dims, - output_num_dims, resolved_axis, num_resolved_axis, - temp_index, temp_sum)) { - return false; - } - - // Calculate mean by dividing output_data by num of aggregated element. - size_t num_elements_in_axis = 1; - for (int idx = 0; idx < num_resolved_axis; ++idx) { - size_t current = static_cast(input_dims[resolved_axis[idx]]); - // Overflow prevention. - if (current > (std::numeric_limits::max() / num_elements_in_axis)) { - return false; - } - num_elements_in_axis *= current; - } - - if (num_elements_in_axis > 0) { - const float scale = input_scale / output_scale; - if (compute_sum) { - // TODO(b/116341117): Eliminate float and do this completely in 8bit. - const float bias = -input_zero_point * scale * num_elements_in_axis; - for (size_t idx = 0; idx < num_outputs; ++idx) { - const U value = - static_cast(TfLiteRound(temp_sum[idx] * scale + bias)) + - output_zero_point; - output_data[idx] = static_cast(value); - } - } else { - const float bias = -input_zero_point * scale; - for (size_t idx = 0; idx < num_outputs; ++idx) { - float float_mean = static_cast(temp_sum[idx]) / - static_cast(num_elements_in_axis); - float result = TfLiteMin( - TfLiteRound(float_mean * scale + bias) + output_zero_point, - static_cast(std::numeric_limits::max())); - result = TfLiteMax(result, - static_cast(std::numeric_limits::min())); - output_data[idx] = static_cast(result); - } - } - } - return true; -} - -} // namespace reference_ops - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/requantize.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/requantize.h deleted file mode 100644 index d1e67785..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/requantize.h +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_ops { - -template -inline void Requantize(const input_type* input_data, int32_t size, - int32_t effective_scale_multiplier, - int32_t effective_scale_shift, int32_t input_zeropoint, - int32_t output_zeropoint, output_type* output_data) { - ruy::profiler::ScopeLabel label("Requantize"); - const bool same_scale = - (effective_scale_multiplier == 1 << 30 && effective_scale_shift == 1); - if (same_scale) { - const bool mixed_type_int8_uint8 = - std::is_same::value && - std::is_same::value; - const bool mixed_type_uint8_int8 = - std::is_same::value && - std::is_same::value; - const int32_t zero_point_diff = input_zeropoint - output_zeropoint; - // Fast path to do requantization for the case when just a shift of 128 is - // needed. - if ((mixed_type_int8_uint8 && zero_point_diff == -128) || - (mixed_type_uint8_int8 && zero_point_diff == 128)) { - for (int i = 0; i < size; ++i) { - output_data[i] = input_data[i] ^ 0x80; - } - return; - } - } - static constexpr int32_t kMinOutput = std::numeric_limits::min(); - static constexpr int32_t kMaxOutput = std::numeric_limits::max(); - for (int i = 0; i < size; ++i) { - const int32_t input = input_data[i] - input_zeropoint; - const int32_t output = - MultiplyByQuantizedMultiplier(input, effective_scale_multiplier, - effective_scale_shift) + - output_zeropoint; - const int32_t clamped_output = - std::max(std::min(output, kMaxOutput), kMinOutput); - output_data[i] = static_cast(clamped_output); - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h deleted file mode 100644 index 95550abc..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ - -#include - -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline int32_t GetNearestNeighbor(const int input_value, - const int32_t input_size, - const int32_t output_size, - const bool align_corners, - const bool half_pixel_centers) { - const float scale = - (align_corners && output_size > 1) - ? (input_size - 1) / static_cast(output_size - 1) - : input_size / static_cast(output_size); - const float offset = half_pixel_centers ? 0.5f : 0.0f; - int32_t output_value = std::min( - align_corners - ? static_cast(TfLiteRound((input_value + offset) * scale)) - : static_cast(std::floor((input_value + offset) * scale)), - input_size - 1); - if (half_pixel_centers) { - output_value = std::max(static_cast(0), output_value); - } - return output_value; -} - -template -inline void ResizeNearestNeighbor( - const tflite::ResizeNearestNeighborParams& op_params, - const RuntimeShape& unextended_input_shape, const T* input_data, - const RuntimeShape& output_size_shape, const int32_t* output_size_data, - const RuntimeShape& unextended_output_shape, T* output_data) { - TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(4, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); - int32_t input_height = input_shape.Dims(1); - int32_t input_width = input_shape.Dims(2); - int32_t depth = MatchingDim(input_shape, 3, output_shape, 3); - - // The Tensorflow version of this op allows resize on the width and height - // axis only. - TFLITE_DCHECK_EQ(output_size_shape.FlatSize(), 2); - int32_t output_height = output_size_data[0]; - int32_t output_width = output_size_data[1]; - - const int col_offset = input_shape.Dims(3); - const int row_offset = input_shape.Dims(2) * col_offset; - const int batch_offset = input_shape.Dims(1) * row_offset; - - const T* input_ptr = input_data; - T* output_ptr = output_data; - for (int b = 0; b < batches; ++b) { - for (int y = 0; y < output_height; ++y) { - int32_t in_y = GetNearestNeighbor(y, input_height, output_height, - op_params.align_corners, - op_params.half_pixel_centers); - const T* y_input_ptr = input_ptr + in_y * row_offset; - for (int x = 0; x < output_width; ++x) { - int32_t in_x = GetNearestNeighbor(x, input_width, output_width, - op_params.align_corners, - op_params.half_pixel_centers); - const T* x_input_ptr = y_input_ptr + in_x * col_offset; - memcpy(output_ptr, x_input_ptr, depth * sizeof(T)); - output_ptr += depth; - } - } - input_ptr += batch_offset; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/round.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/round.h deleted file mode 100644 index 9bd8f3f2..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/round.h +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ - -#include - -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline float RoundToNearest(float value) { - auto floor_val = std::floor(value); - auto diff = value - floor_val; - if ((diff < 0.5f) || - ((diff == 0.5f) && (static_cast(floor_val) % 2 == 0))) { - return floor_val; - } else { - return floor_val = floor_val + 1.0f; - } -} - -inline void Round(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - // Note that this implementation matches that of tensorFlow tf.round - // and corresponds to the bankers rounding method. - // cfenv (for fesetround) is not yet supported universally on Android, so - // using a work around. - output_data[i] = RoundToNearest(input_data[i]); - } -} - -} // namespace reference_ops -} // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ROUND_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/softmax.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/softmax.h deleted file mode 100644 index 1b3f1181..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/softmax.h +++ /dev/null @@ -1,232 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ - -#include - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { -namespace reference_ops { - -inline void Softmax(const SoftmaxParams& params, - const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - - for (int i = 0; i < outer_size; ++i) { - // Find max element value which we'll use to ensure numerical stability - // taking advantage of the following equality: - // exp(x[i])/sum(exp(x[i])) == exp(x[i]+C)/sum(exp(x[i]+C)) - float max = std::numeric_limits::lowest(); - for (int c = 0; c < depth; ++c) { - max = std::max(max, input_data[i * depth + c]); - } - - // Compute sum. - float sum = 0.f; - for (int c = 0; c < depth; ++c) { - const float exp_c = std::exp((input_data[i * depth + c] - max) * - static_cast(params.beta)); - output_data[i * depth + c] = exp_c; - sum += exp_c; - } - - // Compute result. - for (int c = 0; c < depth; ++c) { - output_data[i * depth + c] = output_data[i * depth + c] / sum; - } - } -} - -// Quantized softmax with int8_t/uint8_t input and int8_t/uint8_t/int16_t -// output. -template -inline void Softmax(const SoftmaxParams& params, - const RuntimeShape& input_shape, const InputT* input_data, - const RuntimeShape& output_shape, OutputT* output_data) { - const int32_t input_beta_multiplier = params.input_multiplier; - const int32_t input_beta_left_shift = params.input_left_shift; - const int diff_min = params.diff_min; - // The representation chosen for the input to the exp() function is Q5.26. - // We need to leave extra space since values that we skip might be as large as - // -32 before multiplying by input_beta_multiplier, and therefore as large as - // -16 afterwards. Note that exp(-8) is definitely not insignificant to - // accumulation, but exp(-16) definitely is. - static const int kScaledDiffIntegerBits = 5; - static const int kAccumulationIntegerBits = 12; - using FixedPointScaledDiff = - gemmlowp::FixedPoint; - using FixedPointAccum = - gemmlowp::FixedPoint; - using FixedPoint0 = gemmlowp::FixedPoint; - - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - - for (int i = 0; i < outer_size; ++i) { - InputT max_in_row = std::numeric_limits::min(); - for (int c = 0; c < depth; ++c) { - max_in_row = std::max(max_in_row, input_data[i * depth + c]); - } - - FixedPointAccum sum_of_exps = FixedPointAccum::Zero(); - for (int c = 0; c < depth; ++c) { - int32_t input_diff = - static_cast(input_data[i * depth + c]) - max_in_row; - if (input_diff >= diff_min) { - const int32_t input_diff_rescaled = - MultiplyByQuantizedMultiplierGreaterThanOne( - input_diff, input_beta_multiplier, input_beta_left_shift); - const FixedPointScaledDiff scaled_diff_f8 = - FixedPointScaledDiff::FromRaw(input_diff_rescaled); - sum_of_exps = sum_of_exps + gemmlowp::Rescale( - exp_on_negative_values(scaled_diff_f8)); - } - } - - int num_bits_over_unit; - FixedPoint0 shifted_scale = FixedPoint0::FromRaw(GetReciprocal( - sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit)); - - for (int c = 0; c < depth; ++c) { - int32_t input_diff = - static_cast(input_data[i * depth + c]) - max_in_row; - if (input_diff >= diff_min) { - const int32_t input_diff_rescaled = - MultiplyByQuantizedMultiplierGreaterThanOne( - input_diff, input_beta_multiplier, input_beta_left_shift); - const FixedPointScaledDiff scaled_diff_f8 = - FixedPointScaledDiff::FromRaw(input_diff_rescaled); - - FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8); - int32_t unsat_output = gemmlowp::RoundingDivideByPOT( - (shifted_scale * exp_in_0).raw(), - num_bits_over_unit + 31 - (sizeof(OutputT) * 8)); - - const int32_t shifted_output = - unsat_output + - static_cast(std::numeric_limits::min()); - - output_data[i * depth + c] = static_cast(std::max( - std::min(shifted_output, - static_cast(std::numeric_limits::max())), - static_cast(std::numeric_limits::min()))); - } else { - output_data[i * depth + c] = std::numeric_limits::min(); - } - } - } -} - -// Computes exp(input - max_input) -inline int16_t SoftMaxCalculateExp(const SoftmaxParams& params, - const int16_t* input_data, const int depth, - int16_t max_in_row, int i, int c) { - int32_t input_diff = input_data[i * depth + c] - max_in_row; - // scale the input_diff such that [-65535, 0] correspond to [-10.0, 0.0] - // exp lut generated with range [-10, 0], as exp(-10) is negligible. - int32_t scaled_diff = MultiplyByQuantizedMultiplier( - input_diff, params.input_multiplier, params.input_left_shift); - // recenter to [-32768, 32767] - int32_t sym_scaled_diff = scaled_diff + 32767; - int16_t sat_sym_scaled_diff = - std::min(std::max(sym_scaled_diff, static_cast(-32768)), - static_cast(32767)); - // apply the exp() LUT activation function - return generic_int16_table_lookup(sat_sym_scaled_diff, params.exp_lut); -} -// Quantized softmax with int16_t input and int16_t output. -inline void SoftmaxInt16(const SoftmaxParams& params, - const RuntimeShape& input_shape, - const int16_t* input_data, - const RuntimeShape& output_shape, - int16_t* output_data) { - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - - for (int i = 0; i < outer_size; ++i) { - // Find the largest element - int16_t max_in_row = std::numeric_limits::min(); - for (int c = 0; c < depth; ++c) { - max_in_row = std::max(max_in_row, input_data[i * depth + c]); - } - - // This loops computes the exp values and their sum. We will need the exp - // values later on in the function so we cache them in the output_data - // buffer. This is an optimization done to avoid calculating the exp values - // twice making use of the output_data buffer as scratch memory. - int32_t sum_of_exps = 0; // Q16.15 fixed point format. - int16_t* exp_results_Q015 = output_data + i * depth; - for (int c = 0; c < depth; ++c) { - exp_results_Q015[c] = - SoftMaxCalculateExp(params, input_data, depth, max_in_row, i, c); - sum_of_exps += exp_results_Q015[c]; - } - - // Compute the reciprocal 1/sum_of_exps - uint8_t headroom_plus_one = - CountLeadingZeros(static_cast(sum_of_exps)); - int32_t shifted_sum = - ((static_cast(sum_of_exps) << (headroom_plus_one - 1)) + - (1 << 13)) >> - 14; - // since the LUT computes 1/(1 + x) we need to first compute x = (sum - 1). - // also, the LUT expects a symmetrical input, so we must also recenter x - // from [0, 65535] to [-32768, 32767]. - int32_t sym_shifted_sum = shifted_sum + (-((1 << 15) + (1 << 16))); - int16_t sat_sym_shifted_sum = static_cast( - std::min(std::max(sym_shifted_sum, static_cast(-32768)), - static_cast(32767))); - // apply 1/(1 + x) LUT activation function - int16_t reciprocal_scale_Q015 = generic_int16_table_lookup( - sat_sym_shifted_sum, params.one_over_one_plus_x_lut); - - // Rescale the exp_result with reciprocal - // range of output is [0, 32767] correspond to [0.0, 1.0] - for (int c = 0; c < depth; ++c) { - uint8_t right_shift = 31 - headroom_plus_one; - int64_t round = 1 << (right_shift - 1); - int32_t result = (static_cast(exp_results_Q015[c]) * - static_cast(reciprocal_scale_Q015) + - round) >> - right_shift; - output_data[i * depth + c] = static_cast( - std::min(std::max(result, static_cast(0)), - static_cast(32767))); - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/strided_slice.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/strided_slice.h deleted file mode 100644 index 40dc2e91..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/strided_slice.h +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/strided_slice_logic.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -template -inline void StridedSlice(const tflite::StridedSliceParams& op_params, - const RuntimeShape& unextended_input_shape, - const RuntimeShape& unextended_output_shape, - SequentialTensorWriter* writer) { - using strided_slice::LoopCondition; - using strided_slice::StartForAxis; - using strided_slice::StopForAxis; - - ruy::profiler::ScopeLabel label("StridedSlice"); - - // Note that the output_shape is not used herein. - tflite::StridedSliceParams params_copy = op_params; - - TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 5); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 5); - const RuntimeShape input_shape = - RuntimeShape::ExtendedShape(5, unextended_input_shape); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(5, unextended_output_shape); - - // Reverse and pad to 5 dimensions because that is what the runtime code - // requires (ie. all shapes must be 5D and are given backwards). - strided_slice::StridedSlicePadIndices(¶ms_copy, 5); - - const int start_0 = StartForAxis(params_copy, input_shape, 0); - const int stop_0 = StopForAxis(params_copy, input_shape, 0, start_0); - const int start_1 = StartForAxis(params_copy, input_shape, 1); - const int stop_1 = StopForAxis(params_copy, input_shape, 1, start_1); - const int start_2 = StartForAxis(params_copy, input_shape, 2); - const int stop_2 = StopForAxis(params_copy, input_shape, 2, start_2); - const int start_3 = StartForAxis(params_copy, input_shape, 3); - const int stop_3 = StopForAxis(params_copy, input_shape, 3, start_3); - const int start_4 = StartForAxis(params_copy, input_shape, 4); - const int stop_4 = StopForAxis(params_copy, input_shape, 4, start_4); - - for (int offset_0 = start_0 * input_shape.Dims(1), - end_0 = stop_0 * input_shape.Dims(1), - step_0 = params_copy.strides[0] * input_shape.Dims(1); - !LoopCondition(offset_0, end_0, params_copy.strides[0]); - offset_0 += step_0) { - for (int offset_1 = (offset_0 + start_1) * input_shape.Dims(2), - end_1 = (offset_0 + stop_1) * input_shape.Dims(2), - step_1 = params_copy.strides[1] * input_shape.Dims(2); - !LoopCondition(offset_1, end_1, params_copy.strides[1]); - offset_1 += step_1) { - for (int offset_2 = (offset_1 + start_2) * input_shape.Dims(3), - end_2 = (offset_1 + stop_2) * input_shape.Dims(3), - step_2 = params_copy.strides[2] * input_shape.Dims(3); - !LoopCondition(offset_2, end_2, params_copy.strides[2]); - offset_2 += step_2) { - for (int offset_3 = (offset_2 + start_3) * input_shape.Dims(4), - end_3 = (offset_2 + stop_3) * input_shape.Dims(4), - step_3 = params_copy.strides[3] * input_shape.Dims(4); - !LoopCondition(offset_3, end_3, params_copy.strides[3]); - offset_3 += step_3) { - for (int offset_4 = offset_3 + start_4, end_4 = offset_3 + stop_4; - !LoopCondition(offset_4, end_4, params_copy.strides[4]); - offset_4 += params_copy.strides[4]) { - writer->Write(offset_4); - } - } - } - } - } -} - -template -inline void StridedSlice(const tflite::StridedSliceParams& op_params, - const RuntimeShape& unextended_input_shape, - const T* input_data, - const RuntimeShape& unextended_output_shape, - T* output_data) { - SequentialTensorWriter writer(input_data, output_data); - StridedSlice(op_params, unextended_input_shape, unextended_output_shape, - &writer); -} - -template -inline void StridedSlice(const tflite::StridedSliceParams& op_params, - const RuntimeShape& unextended_input_shape, - const TfLiteTensor* input, - const RuntimeShape& unextended_output_shape, - TfLiteTensor* output) { - SequentialTensorWriter writer(input, output); - StridedSlice(op_params, unextended_input_shape, unextended_output_shape, - &writer); -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_STRIDED_SLICE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/sub.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/sub.h deleted file mode 100644 index 5f4fd19e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/sub.h +++ /dev/null @@ -1,512 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_ - -#include - -#include -#include - -#include "ruy/profiler/instrumentation.h" // from @ruy -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline void SubNonBroadcast(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] - input2_data[i], params.float_activation_min, - params.float_activation_max); - } -} - -inline void SubNonBroadcast(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] - input2_data[i], params.quantized_activation_min, - params.quantized_activation_max); - } -} - -// TODO(b/151345304): We can implement BroadcastSub on buffers of arbitrary -// dimensionality if the runtime code does a single loop over one dimension -// that handles broadcasting as the base case. The code generator would then -// generate max(D1, D2) nested for loops. -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/float"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.float_activation_min, params.float_activation_max); - }; - NDOpsHelper(output_desc, sub_func); -} - -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const uint8_t* input1_data, - const RuntimeShape& input2_shape, - const uint8_t* input2_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/uint8_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - const int32_t input1_val = - params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = - params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[SubscriptToIndex(output_desc, indexes)] = - static_cast(clamped_output); - }; - NDOpsHelper(output_desc, sub_func); -} - -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int32_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.quantized_activation_min, params.quantized_activation_max); - }; - NDOpsHelper(output_desc, sub_func); -} - -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int8_t* input1_data, - const RuntimeShape& input2_shape, - const int8_t* input2_data, - const RuntimeShape& output_shape, - int8_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int8_t"); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - const int32_t input1_val = - params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = - params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[SubscriptToIndex(output_desc, indexes)] = - static_cast(clamped_output); - }; - NDOpsHelper(output_desc, sub_func); -} - -template -void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int64_t* input1_data, - const RuntimeShape& input2_shape, - const int64_t* input2_data, - const RuntimeShape& output_shape, int64_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int64_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.int64_activation_min, params.int64_activation_max); - }; - NDOpsHelper(output_desc, sub_func); -} - -template -void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const T* input1_data, - const RuntimeShape& input2_shape, const T* input2_data, - const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/templated"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - output_data[SubscriptToIndex(output_desc, indexes)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, indexes)] - - input2_data[SubscriptToIndex(desc2, indexes)], - params.quantized_activation_min, params.quantized_activation_max); - }; - NDOpsHelper(output_desc, sub_func); -} - -// Element-wise Sub that can often be used for inner loop of broadcast sub as -// well as the non-broadcast sub. -inline void SubElementwise(int size, const ArithmeticParams& params, - const uint8_t* input1_data, - const uint8_t* input2_data, uint8_t* output_data) { - TFLITE_DCHECK_GT(params.input1_offset, -256); - TFLITE_DCHECK_GT(params.input2_offset, -256); - TFLITE_DCHECK_LT(params.input1_offset, 256); - TFLITE_DCHECK_LT(params.input2_offset, 256); - - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); - } -} - -// Element-wise add that can often be used for inner loop of broadcast add as -// well as the non-broadcast add. -inline void SubElementwise(int size, const ArithmeticParams& params, - const int8_t* input1_data, const int8_t* input2_data, - int8_t* output_data) { - const int32_t int8_max_value = std::numeric_limits::max(); - TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value); - TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value); - TFLITE_DCHECK_LE(params.input1_offset, int8_max_value); - TFLITE_DCHECK_LE(params.input2_offset, int8_max_value); - - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); - } -} - -inline void Sub(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const uint8_t* input1_data, - const RuntimeShape& input2_shape, const uint8_t* input2_data, - const RuntimeShape& output_shape, uint8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - TFLITE_DCHECK_GT(params.input1_offset, -256); - TFLITE_DCHECK_GT(params.input2_offset, -256); - TFLITE_DCHECK_LT(params.input1_offset, 256); - TFLITE_DCHECK_LT(params.input2_offset, 256); - SubElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void Sub(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const int8_t* input1_data, - const RuntimeShape& input2_shape, const int8_t* input2_data, - const RuntimeShape& output_shape, int8_t* output_data) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - const int32_t int8_max_value = std::numeric_limits::max(); - TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value); - TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value); - TFLITE_DCHECK_LE(params.input1_offset, int8_max_value); - TFLITE_DCHECK_LE(params.input2_offset, int8_max_value); - SubElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -template -void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, - T* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - input1_data[SubscriptToIndex(desc1, b, y, x, c)] - - input2_data[SubscriptToIndex(desc2, b, y, x, c)]; - } - } - } - } -} - -inline void SetActivationMinMax(const ArithmeticParams& params, - int32_t* activation_min, - int32_t* activation_max) { - *activation_min = params.quantized_activation_min; - *activation_max = params.quantized_activation_max; -} - -inline void SetActivationMinMax(const ArithmeticParams& params, - float* activation_min, float* activation_max) { - *activation_min = params.float_activation_min; - *activation_max = params.float_activation_max; -} - -inline void SetActivationMinMax(const ArithmeticParams& params, - int64_t* activation_min, - int64_t* activation_max) { - *activation_min = params.int64_activation_min; - *activation_max = params.int64_activation_max; -} - -template -inline void SubWithActivation( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - ruy::profiler::ScopeLabel label("SubWithActivation"); - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - T activation_min, activation_max; - SetActivationMinMax(params, &activation_min, &activation_max); - - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] - input2_data[i], activation_min, activation_max); - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SUB_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/tanh.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/tanh.h deleted file mode 100644 index 3a05c474..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/tanh.h +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_ - -#include - -#include "fixedpoint/fixedpoint.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { -namespace reference_ops { - -inline void Tanh(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - float val = input_data[i]; - float result = std::tanh(val); - output_data[i] = result; - } -} - -// Convenience version that allows, for example, generated-code calls to be -// uniform between data types. -inline void Tanh(const TanhParams&, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& output_shape, - float* output_data) { - // Drop params: not needed. - Tanh(input_shape, input_data, output_shape, output_data); -} - -inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& output_shape, - int16_t* output_data) { - const int input_left_shift = params.input_left_shift; - // Support for shifts is limited until we have a parameterized version of - // SaturatingRoundingMultiplyByPOT(). - TFLITE_DCHECK_GE(input_left_shift, 0); - TFLITE_DCHECK_LE(input_left_shift, 1); - - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - // F0 uses 0 integer bits, range [-1, 1]. - // This is the return type of math functions such as tanh, logistic, - // whose range is in [-1, 1]. - using F0 = gemmlowp::FixedPoint; - // F3 uses 3 integer bits, range [-8, 8], the input range expected here. - using F3 = gemmlowp::FixedPoint; - - if (input_left_shift == 0) { - for (int i = 0; i < flat_size; i++) { - F3 input = F3::FromRaw(input_data[i]); - F0 output = gemmlowp::tanh(input); - output_data[i] = output.raw(); - } - } else { - for (int i = 0; i < flat_size; i++) { - F3 input = F3::FromRaw( - gemmlowp::SaturatingRoundingMultiplyByPOT<1>(input_data[i])); - F0 output = gemmlowp::tanh(input); - output_data[i] = output.raw(); - } - } -} - -inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& output_shape, - uint8_t* output_data) { - const int32_t input_zero_point = params.input_zero_point; - const int32_t input_range_radius = params.input_range_radius; - const int32_t input_multiplier = params.input_multiplier; - const int input_left_shift = params.input_left_shift; - const int32_t output_zero_point = 128; - const int flat_size = MatchingFlatSize(input_shape, output_shape); - - for (int i = 0; i < flat_size; i++) { - const uint8_t input_val_u8 = input_data[i]; - const int32_t input_val_centered = - static_cast(input_val_u8) - input_zero_point; - uint8_t output_val; - if (input_val_centered <= -input_range_radius) { - output_val = 0; - } else if (input_val_centered >= input_range_radius) { - output_val = 255; - } else { - const int32_t input_val_rescaled = - MultiplyByQuantizedMultiplierGreaterThanOne( - input_val_centered, input_multiplier, input_left_shift); - using FixedPoint4 = gemmlowp::FixedPoint; - using FixedPoint0 = gemmlowp::FixedPoint; - const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled); - const FixedPoint0 output_val_f0 = gemmlowp::tanh(input_val_f4); - // Convert from Q0.31 to Q24.7. - using gemmlowp::RoundingDivideByPOT; - int32_t output_val_s32 = RoundingDivideByPOT(output_val_f0.raw(), 24); - output_val_s32 += output_zero_point; - if (output_val_s32 == 256) { - output_val_s32 = 255; - } - // Reinterpret as Q0.7, encoded in uint8_t. - TFLITE_DCHECK_GE(output_val_s32, 0); - TFLITE_DCHECK_LE(output_val_s32, 255); - output_val = static_cast(output_val_s32); - } - output_data[i] = output_val; - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TANH_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/transpose_conv.h deleted file mode 100644 index 6e9cb1f9..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/reference/transpose_conv.h +++ /dev/null @@ -1,217 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ - -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -namespace reference_ops { - -inline void TransposeConv( - const ConvParams& params, const RuntimeShape& input_shape, - const float* input_data, const RuntimeShape& filter_shape, - const float* filter_data, const RuntimeShape& bias_shape, - const float* bias_data, const RuntimeShape& output_shape, - float* output_data, const RuntimeShape& im2col_shape, float* im2col_data) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - // Although transpose convolution simplifies to convolution with transposed - // weights for strides of 1, non-unitary striding complicates matters. To - // keep this reference implementation as clear as possible, we use a - // "scatter" access pattern, where we loop through all the input elements, - // computing their influence on the output, rather than looping through the - // output elements in the typical "gather" access pattern of a conv. We - // therefore must initialize the output array to zero. - const int num_elements = output_shape.FlatSize(); - for (int i = 0; i < num_elements; i++) { - output_data[i] = 0.0f; - } - - // Loop through input elements one at a time. - for (int batch = 0; batch < batches; ++batch) { - for (int in_y = 0; in_y < input_height; ++in_y) { - for (int in_x = 0; in_x < input_width; ++in_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - // Loop through the output elements it will influence - const int out_x_origin = (in_x * stride_width) - pad_width; - const int out_y_origin = (in_y * stride_height) - pad_height; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int out_channel = 0; out_channel < output_depth; - ++out_channel) { - // Compute output element location - const int out_x = out_x_origin + filter_x; - const int out_y = out_y_origin + filter_y; - // We cannot accumulate out of bounds - if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && - (out_y < output_height)) { - float input_value = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - float filter_value = - filter_data[Offset(filter_shape, out_channel, filter_y, - filter_x, in_channel)]; - output_data[Offset(output_shape, batch, out_y, out_x, - out_channel)] += - input_value * filter_value; - } - } - } - } - } - } - } - } - if (bias_data) { - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - output_data[Offset(output_shape, batch, out_y, out_x, - out_channel)] += bias_data[out_channel]; - } - } - } - } - } -} - -inline void TransposeConv( - const ConvParams& params, const RuntimeShape& input_shape, - const uint8_t* input_data, const RuntimeShape& filter_shape, - const uint8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - uint8_t* output_data, const RuntimeShape& im2col_shape, - uint8_t* im2col_data, int32_t* scratch_buffer) { - const int stride_width = params.stride_width; - const int stride_height = params.stride_height; - const int pad_width = params.padding_values.width; - const int pad_height = params.padding_values.height; - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - (void)im2col_data; // only used in optimized code. - (void)im2col_shape; // only used in optimized code. - - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int filter_height = filter_shape.Dims(1); - const int filter_width = filter_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; - const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; - const int32_t output_activation_min = params.quantized_activation_min; - const int32_t output_activation_max = params.quantized_activation_max; - TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - if (bias_data) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - const int num_elements = output_shape.FlatSize(); - // We need to initialize scratch_buffer to all 0s, as we apply the same - // 'scatter' based trick as in float version. - memset(scratch_buffer, 0, num_elements * sizeof(int32_t)); - - // Loop through input elements one at a time. - for (int batch = 0; batch < batches; ++batch) { - for (int in_y = 0; in_y < input_height; ++in_y) { - for (int in_x = 0; in_x < input_width; ++in_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - // Loop through the output elements it will influence. - const int out_x_origin = (in_x * stride_width) - pad_width; - const int out_y_origin = (in_y * stride_height) - pad_height; - for (int filter_y = 0; filter_y < filter_height; ++filter_y) { - for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int out_channel = 0; out_channel < output_depth; - ++out_channel) { - // Compute output element location. - const int out_x = out_x_origin + filter_x; - const int out_y = out_y_origin + filter_y; - // We cannot accumulate out of bounds. - if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) && - (out_y < output_height)) { - uint8_t input_value = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; - uint8_t filter_value = - filter_data[Offset(filter_shape, out_channel, filter_y, - filter_x, in_channel)]; - scratch_buffer[Offset(output_shape, batch, out_y, out_x, - out_channel)] += - (input_value + input_offset) * - (filter_value + filter_offset); - } - } - } - } - } - } - } - } - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, - out_channel)]; - if (bias_data) { - acc += bias_data[out_channel]; - } - int32_t scaled_acc = MultiplyByQuantizedMultiplier( - acc, output_multiplier, output_shift); - scaled_acc += output_offset; - scaled_acc = std::max(scaled_acc, output_activation_min); - scaled_acc = std::min(scaled_acc, output_activation_max); - output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = - static_cast(scaled_acc); - } - } - } - } -} - -} // namespace reference_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/strided_slice_logic.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/strided_slice_logic.h deleted file mode 100644 index bfe84050..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/strided_slice_logic.h +++ /dev/null @@ -1,211 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_ - -#include -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace strided_slice { - -// Use until std::clamp() is available from C++17. -inline int Clamp(const int v, const int lo, const int hi) { - TFLITE_DCHECK(!(hi < lo)); - if (hi < v) return hi; - if (v < lo) return lo; - return v; -} - -inline void StridedSlicePadIndices(tflite::StridedSliceParams* p, - int dim_count) { - // Add indices and mask bits to fully include extra dimensions - TFLITE_CHECK_LE(dim_count, 5); - TFLITE_CHECK_GE(dim_count, p->start_indices_count); - TFLITE_CHECK_EQ(p->start_indices_count, p->stop_indices_count); - TFLITE_CHECK_EQ(p->stop_indices_count, p->strides_count); - - const int pad_count = dim_count - p->start_indices_count; - - // Pad indices at start, so move arrays by pad_count. - for (int i = p->start_indices_count - 1; i >= 0; --i) { - p->strides[i + pad_count] = p->strides[i]; - p->start_indices[i + pad_count] = p->start_indices[i]; - p->stop_indices[i + pad_count] = p->stop_indices[i]; - } - for (int i = 0; i < pad_count; ++i) { - p->start_indices[i] = 0; - p->stop_indices[i] = 1; - p->strides[i] = 1; - } - - // Pad masks with 0s or 1s as required. - p->shrink_axis_mask <<= pad_count; - p->ellipsis_mask <<= pad_count; - p->new_axis_mask <<= pad_count; - p->begin_mask <<= pad_count; - p->end_mask <<= pad_count; - p->begin_mask |= (1 << pad_count) - 1; - p->end_mask |= (1 << pad_count) - 1; - - p->start_indices_count = dim_count; - p->stop_indices_count = dim_count; - p->strides_count = dim_count; -} - -// Return the index for the first element along that axis. This index will be a -// positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0) -// that can be used to index directly into the data. -inline int StartForAxis(const tflite::StridedSliceParams& params, - const RuntimeShape& input_shape, int axis) { - const auto begin_mask = params.begin_mask; - const auto* start_indices = params.start_indices; - const auto* strides = params.strides; - const int axis_size = input_shape.Dims(axis); - if (axis_size == 0) { - return 0; - } - // Begin with the specified index. - int start = start_indices[axis]; - - // begin_mask override - if (begin_mask & 1 << axis) { - if (strides[axis] > 0) { - // Forward iteration - use the first element. These values will get - // clamped below (Note: We could have set them to 0 and axis_size-1, but - // use lowest() and max() to maintain symmetry with StopForAxis()) - start = std::numeric_limits::lowest(); - } else { - // Backward iteration - use the last element. - start = std::numeric_limits::max(); - } - } - - // Handle negative indices - if (start < 0) { - start += axis_size; - } - - // Clamping - if (strides[axis] > 0) { - // Forward iteration - start = Clamp(start, 0, axis_size); - } else { - // Backward iteration - start = Clamp(start, -1, axis_size - 1); - } - - return start; -} - -// Return the "real" index for the end of iteration along that axis. This is an -// "end" in the traditional C sense, in that it points to one past the last -// element. ie. So if you were iterating through all elements of a 1D array of -// size 4, this function would return 4 as the stop, because it is one past the -// "real" indices of 0, 1, 2 & 3. -inline int StopForAxis(const tflite::StridedSliceParams& params, - const RuntimeShape& input_shape, int axis, - int start_for_axis) { - const auto end_mask = params.end_mask; - const auto shrink_axis_mask = params.shrink_axis_mask; - const auto* stop_indices = params.stop_indices; - const auto* strides = params.strides; - const int axis_size = input_shape.Dims(axis); - if (axis_size == 0) { - return 0; - } - - // Begin with the specified index - const bool shrink_axis = shrink_axis_mask & (1 << axis); - int stop = stop_indices[axis]; - - // When shrinking an axis, the end position does not matter (and can be - // incorrect when negative indexing is used, see Issue #19260). Always use - // start_for_axis + 1 to generate a length 1 slice, since start_for_axis has - // already been adjusted for negative indices. - if (shrink_axis) { - return start_for_axis + 1; - } - - // end_mask override - if (end_mask & (1 << axis)) { - if (strides[axis] > 0) { - // Forward iteration - use the last element. These values will get - // clamped below - stop = std::numeric_limits::max(); - } else { - // Backward iteration - use the first element. - stop = std::numeric_limits::lowest(); - } - } - - // Handle negative indices - if (stop < 0) { - stop += axis_size; - } - - // Clamping - // Because the end index points one past the last element, we need slightly - // different clamping ranges depending on the direction. - if (strides[axis] > 0) { - // Forward iteration - stop = Clamp(stop, 0, axis_size); - } else { - // Backward iteration - stop = Clamp(stop, -1, axis_size - 1); - } - - return stop; -} - -inline bool LoopCondition(int index, int stop, int stride) { - // True when we have reached the end of an axis and should loop. - return stride > 0 ? index >= stop : index <= stop; -} - -inline tflite::StridedSliceParams BuildStridedSliceParams( - int begin_mask, int end_mask, int shrink_axis_mask, - const std::vector& start_indices, const std::vector& stop_indices, - const std::vector& strides) { - tflite::StridedSliceParams op_params; - const int dims_count = start_indices.size(); - - op_params.start_indices_count = dims_count; - op_params.stop_indices_count = dims_count; - op_params.strides_count = dims_count; - for (int i = 0; i < dims_count; ++i) { - op_params.start_indices[i] = start_indices[i]; - op_params.stop_indices[i] = stop_indices[i]; - op_params.strides[i] = strides[i]; - } - - op_params.begin_mask = begin_mask; - op_params.ellipsis_mask = 0; - op_params.end_mask = end_mask; - op_params.new_axis_mask = 0; - op_params.shrink_axis_mask = shrink_axis_mask; - - return op_params; -} - -} // namespace strided_slice - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/tensor_ctypes.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/tensor_ctypes.h deleted file mode 100644 index f1d3e17f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/tensor_ctypes.h +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -template -inline T* GetTensorData(TfLiteTensor* tensor) { - return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; -} - -template -inline const T* GetTensorData(const TfLiteTensor* tensor) { - return tensor != nullptr ? reinterpret_cast(tensor->data.raw) - : nullptr; -} - -inline RuntimeShape GetTensorShape(const TfLiteTensor* tensor) { - if (tensor == nullptr) { - return RuntimeShape(); - } - - TfLiteIntArray* dims = tensor->dims; - const int dims_size = dims->size; - const int32_t* dims_data = reinterpret_cast(dims->data); - return RuntimeShape(dims_size, dims_data); -} - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/types.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/types.h deleted file mode 100644 index 1703589c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/internal/types.h +++ /dev/null @@ -1,1169 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_ - -#include -#include -#include -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" - -namespace tflite { - -enum class FusedActivationFunctionType : uint8_t { - kNone, - kRelu6, - kRelu1, - kRelu -}; -enum class PaddingType : uint8_t { kNone, kSame, kValid }; - -struct PaddingValues { - int16_t width; - int16_t height; - // offset is used for calculating "remaining" padding, for example, `width` - // is 1 and `width_offset` is 1, so padding_left is 1 while padding_right is - // 1 + 1 = 2. - int16_t width_offset; - // Same as width_offset except it's over the height dimension. - int16_t height_offset; -}; - -// This enumeration allows for non-default formats for the weights array -// of a fully-connected operator, allowing the use of special optimized -// runtime paths. -enum class FullyConnectedWeightsFormat : uint8_t { - // Default format (flat 2D layout, the inner contiguous dimension - // is input_depth, the outer non-contiguous dimension is output_depth) - kDefault, - // Summary: optimized layout for fast CPU runtime implementation, - // aimed specifically at ARM CPUs at the moment, and specialized for - // 8-bit quantized layers. - // - // The use case we're concerned with here is: 8-bit quantization, - // large weights matrix that doesn't fit in cache (e.g. 4096x2048 in - // a key application that drove this), very small batch size (e.g. 1 -- 4). - // - // Even with 8-bit quantization of weights, the performance of memory - // accesses to the weights can become the dominant issue when - // the batch size is small, so each weight value is used in only a few - // arithmetic ops, i.e. the fully-connected node has a low arithmetic - // intensity. The specific issues that arise are of three kinds: - // (1) One may, ideally, max out DRAM bandwidth, i.e. be truly memory - // bound. That's the "good" issue to run into. - // (2) One may run into sub-optimal pre-fetching: the data hasn't been - // prefetched into the cache by the time we need it. - // (3) One may run into cache aliasing: multiple values that are - // pre-fetched, alias each other in the L1 cache (which typically - // has only 4-way set associativity in ARM CPUs) and thus evict - // each other before we get to using them. - // - // The point of this shuffling is to avoid issues (2) and (3) so that - // we get as fast as possible given only the hard constraint (1). - // This is achieved by turning the difficulty into a solution: the - // difficulty, that each value loaded from memory is used only in - // one kernel iteration, making this operation memory-intensive, hints at - // the solution, of shuffling the weights so that they are stored in the - // exact order as the kernel needs to load them, so that the memory - // accesses made by the kernel are trivial. This solves (2) because the - // trivial memory access pattern allows the CPU's automatic prefetching - // to perform very well (no need even for preload instructions), and this - // solves (3) because the values being loaded concurrently are now - // contiguous in the address space, thus don't alias each other in the cache. - // - // On ARM, we typically want our kernel to process a 4x16 block of weights - // at a time, because: - // - 16 is the number of bytes in a NEON register. - // - 4 is how many rows we need to handle concurrently in the kernel in - // order to have sufficient mutual independence of instructions to - // maximize arithmetic throughput. - // - // Finally, the 'Int8' part in the name refers to the fact that this - // weights format has each weights value encoded as a signed int8_t value, - // even if the data type of the weights buffer is uint8_t. This is intended - // to save runtime kernels the effort to have to XOR the top bit of these - // bytes before using them in signed arithmetic, see this file for more - // explanations on the 'signed int8_t trick' in matrix multiplication kernels: - // - // tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc - // - kShuffled4x16Int8, -}; - -// Quantization parameters, determining the mapping of quantized values -// to real values (i.e. determining how quantized values are mathematically -// interpreted). -// -// The correspondence is as follows: -// -// real_value = scale * (quantized_value - zero_point); -// -// In other words, zero_point designates which quantized value corresponds to -// the real 0 value, and scale designates the difference between the real values -// corresponding to consecutive quantized values differing by 1. -struct QuantizationParams { - int32_t zero_point = 0; - double scale = 0.0; -}; - -inline bool operator==(const QuantizationParams& qp1, - const QuantizationParams& qp2) { - return qp1.zero_point == qp2.zero_point && qp1.scale == qp2.scale; -} - -template -struct Dims { - int sizes[N]; - int strides[N]; -}; - -class RuntimeShape { - public: - // Shapes with dimensions up to 5 are stored directly in the structure, while - // larger shapes are separately allocated. - static constexpr int kMaxSmallSize = 5; - - RuntimeShape& operator=(RuntimeShape const&) = delete; - - RuntimeShape() : size_(0) {} - - explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) { - if (dimensions_count > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - dims_pointer_ = new int32_t[dimensions_count]; -#endif // TF_LITE_STATIC_MEMORY - } - } - - RuntimeShape(int shape_size, int32_t value) : size_(0) { - Resize(shape_size); - for (int i = 0; i < shape_size; ++i) { - SetDim(i, value); - } - } - - RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) { - ReplaceWith(dimensions_count, dims_data); - } - - RuntimeShape(const std::initializer_list init_list) : size_(0) { - BuildFrom(init_list); - } - - // Avoid using this constructor. We should be able to delete it when C++17 - // rolls out. - RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) { - if (size_ > kMaxSmallSize) { - dims_pointer_ = new int32_t[size_]; - } - std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_); - } - - bool operator==(const RuntimeShape& comp) const { - return this->size_ == comp.size_ && - std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) == - 0; - } - - ~RuntimeShape() { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - delete[] dims_pointer_; -#endif // TF_LITE_STATIC_MEMORY - } - } - - inline int32_t DimensionsCount() const { return size_; } - inline int32_t Dims(int i) const { - TFLITE_DCHECK_GE(i, 0); - TFLITE_DCHECK_LT(i, size_); - return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i]; - } - inline void SetDim(int i, int32_t val) { - TFLITE_DCHECK_GE(i, 0); - TFLITE_DCHECK_LT(i, size_); - if (size_ > kMaxSmallSize) { - dims_pointer_[i] = val; - } else { - dims_[i] = val; - } - } - - inline int32_t* DimsData() { - return size_ > kMaxSmallSize ? dims_pointer_ : dims_; - } - inline const int32_t* DimsData() const { - return size_ > kMaxSmallSize ? dims_pointer_ : dims_; - } - // The caller must ensure that the shape is no bigger than 5-D. - inline const int32_t* DimsDataUpTo5D() const { return dims_; } - - inline void Resize(int dimensions_count) { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - delete[] dims_pointer_; -#endif // TF_LITE_STATIC_MEMORY - } - size_ = dimensions_count; - if (dimensions_count > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - dims_pointer_ = new int32_t[dimensions_count]; -#endif // TF_LITE_STATIC_MEMORY - } - } - - inline void ReplaceWith(int dimensions_count, const int32_t* dims_data) { - Resize(dimensions_count); - int32_t* dst_dims = DimsData(); - std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t)); - } - - template - inline void BuildFrom(const T& src_iterable) { - const int dimensions_count = - std::distance(src_iterable.begin(), src_iterable.end()); - Resize(dimensions_count); - int32_t* data = DimsData(); - for (auto it : src_iterable) { - *data = it; - ++data; - } - } - - // This will probably be factored out. Old code made substantial use of 4-D - // shapes, and so this function is used to extend smaller shapes. Note that - // (a) as Dims<4>-dependent code is eliminated, the reliance on this should be - // reduced, and (b) some kernels are stricly 4-D, but then the shapes of their - // inputs should already be 4-D, so this function should not be needed. - inline static RuntimeShape ExtendedShape(int new_shape_size, - const RuntimeShape& shape) { - return RuntimeShape(new_shape_size, shape, 1); - } - - inline void BuildFrom(const std::initializer_list init_list) { - BuildFrom>(init_list); - } - - // Returns the total count of elements, that is the size when flattened into a - // vector. - inline int FlatSize() const { - int buffer_size = 1; - const int* dims_data = reinterpret_cast(DimsData()); - for (int i = 0; i < size_; i++) { - buffer_size *= dims_data[i]; - } - return buffer_size; - } - - bool operator!=(const RuntimeShape& comp) const { return !((*this) == comp); } - - private: - // For use only by ExtendedShape(), written to guarantee (return-value) copy - // elision in C++17. - // This creates a shape padded to the desired size with the specified value. - RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value) - : size_(0) { - // If the following check fails, it is likely because a 4D-only kernel is - // being used with an array of larger dimension count. - TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount()); - Resize(new_shape_size); - const int size_increase = new_shape_size - shape.DimensionsCount(); - for (int i = 0; i < size_increase; ++i) { - SetDim(i, pad_value); - } - std::memcpy(DimsData() + size_increase, shape.DimsData(), - sizeof(int32_t) * shape.DimensionsCount()); - } - - int32_t size_; - union { - int32_t dims_[kMaxSmallSize]; - int32_t* dims_pointer_; - }; -}; - -// Converts inference-style shape to legacy tflite::Dims<4>. -inline tflite::Dims<4> ToRuntimeDims(const tflite::RuntimeShape& array_shape) { - tflite::Dims<4> result; - const int dimensions_count = array_shape.DimensionsCount(); - TFLITE_CHECK_LE(dimensions_count, 4); - int cum_prod = 1; - for (int i = 0; i < 4; i++) { - const int new_dim = - (i < dimensions_count) ? array_shape.Dims(dimensions_count - 1 - i) : 1; - result.sizes[i] = new_dim; - result.strides[i] = cum_prod; - cum_prod *= new_dim; - } - return result; -} - -// TODO(b/80418076): Move to legacy ops file, update invocations. -inline RuntimeShape DimsToShape(const tflite::Dims<4>& dims) { - return RuntimeShape( - {dims.sizes[3], dims.sizes[2], dims.sizes[1], dims.sizes[0]}); -} - -// Gets next index to iterate through a multidimensional array. -inline bool NextIndex(const int num_dims, const int* dims, int* current) { - if (num_dims == 0) { - return false; - } - TFLITE_DCHECK(dims != nullptr); - TFLITE_DCHECK(current != nullptr); - int carry = 1; - for (int idx = num_dims - 1; idx >= 0; --idx) { - int current_val = current[idx] + carry; - TFLITE_DCHECK_GE(dims[idx], current_val); - if (dims[idx] == current_val) { - current[idx] = 0; - } else { - current[idx] = current_val; - carry = 0; - break; - } - } - return (carry == 0); -} - -// Gets offset of index if reducing on axis. When reducing, the flattened offset -// will not change, if the input index changes on the given axis. For example, -// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0, -// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened -// offset. -// TODO(kanlig): uses Dims to represent dimensions. -inline size_t ReducedOutputOffset(const int num_dims, const int* dims, - const int* index, const int num_axis, - const int* axis) { - if (num_dims == 0) { - return 0; - } - TFLITE_DCHECK(dims != nullptr); - TFLITE_DCHECK(index != nullptr); - size_t offset = 0; - for (int idx = 0; idx < num_dims; ++idx) { - // if we need to skip this axis - bool is_axis = false; - if (axis != nullptr) { - for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) { - if (idx == axis[axis_idx]) { - is_axis = true; - break; - } - } - } - if (!is_axis) { - offset = offset * static_cast(dims[idx]) + - static_cast(index[idx]); - } - } - return offset; -} - -inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4); - const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D()); - TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]); - return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3; -} - -inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3, - int i4) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5); - const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D()); - TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]); - TFLITE_DCHECK(i4 >= 0 && i4 < dims_data[4]); - return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) * - dims_data[4] + - i4; -} - -inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) { - TFLITE_DCHECK(i0 >= 0 && i0 < dims.sizes[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims.sizes[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims.sizes[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims.sizes[3]); - return i0 * dims.strides[0] + i1 * dims.strides[1] + i2 * dims.strides[2] + - i3 * dims.strides[3]; -} - -inline int Offset(const Dims<4>& dims, int* index) { - return Offset(dims, index[0], index[1], index[2], index[3]); -} - -inline int Offset(const RuntimeShape& shape, int* index) { - return Offset(shape, index[0], index[1], index[2], index[3]); -} - -// Get array size, DCHECKing that the dim index is in range. -// -// Note that this will be phased out with Dims<4>, since RuntimeShape::Dims() -// already performs this check. -template -int ArraySize(const Dims& array, int index) { - TFLITE_DCHECK(index >= 0 && index < N); - return array.sizes[index]; -} - -// Get common array size, DCHECKing that they all agree. -template -int MatchingArraySize(const ArrayType1& array1, int index1, - const ArrayType2& array2, int index2) { - TFLITE_DCHECK_EQ(ArraySize(array1, index1), ArraySize(array2, index2)); - return ArraySize(array1, index1); -} - -template -int MatchingArraySize(const ArrayType1& array1, int index1, - const ArrayType2& array2, int index2, Args... args) { - TFLITE_DCHECK_EQ(ArraySize(array1, index1), ArraySize(array2, index2)); - return MatchingArraySize(array1, index1, args...); -} - -// Get common shape dim, DCHECKing that they all agree. -inline int MatchingDim(const RuntimeShape& shape1, int index1, - const RuntimeShape& shape2, int index2) { - TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); - return std::min(shape1.Dims(index1), shape2.Dims(index2)); -} - -template -int MatchingDim(const RuntimeShape& shape1, int index1, - const RuntimeShape& shape2, int index2, Args... args) { - TFLITE_DCHECK_EQ(shape1.Dims(index1), shape2.Dims(index2)); - return MatchingDim(shape1, index1, args...); -} - -// Will be phased out with Dims<4>, replaced by RuntimeShape::FlatSize(). -template -inline int FlatSize(const Dims& dims) { - int flat_size = 1; - for (int i = 0; i < N; ++i) { - flat_size *= dims.sizes[i]; - } - return flat_size; -} - -TFLITE_DEPRECATED("Prefer FlatSize.") -inline int RequiredBufferSizeForDims(const Dims<4>& dims) { - return FlatSize(dims); -} - -inline int MatchingElementsSize(const RuntimeShape& shape, - const RuntimeShape& check_shape_0) { - const int size_1 = shape.FlatSize(); - const int size_2 = check_shape_0.FlatSize(); - TFLITE_CHECK_EQ(size_1, size_2); - return size_1; -} - -inline int MatchingElementsSize(const RuntimeShape& shape, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1) { - const int size_1 = shape.FlatSize(); - const int size_2 = check_shape_0.FlatSize(); - const int size_3 = check_shape_1.FlatSize(); - TFLITE_CHECK_EQ(size_1, size_2); - TFLITE_CHECK_EQ(size_2, size_3); - return size_1; -} - -// Flat size calculation, checking that dimensions match with one or more other -// arrays. -inline int MatchingFlatSize(const RuntimeShape& shape, - const RuntimeShape& check_shape_0) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount()); - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - return shape.FlatSize(); -} - -inline int MatchingFlatSize(const RuntimeShape& shape, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount()); - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - return MatchingFlatSize(shape, check_shape_1); -} - -inline int MatchingFlatSize(const RuntimeShape& shape, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1, - const RuntimeShape& check_shape_2) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount()); - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - return MatchingFlatSize(shape, check_shape_1, check_shape_2); -} - -inline int MatchingFlatSize(const RuntimeShape& shape, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1, - const RuntimeShape& check_shape_2, - const RuntimeShape& check_shape_3) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount()); - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - return MatchingFlatSize(shape, check_shape_1, check_shape_2, check_shape_3); -} - -// Flat size calculation, checking that dimensions match with one or more other -// arrays. -template -inline int MatchingFlatSize(const Dims& dims, const Dims& check_dims_0) { - for (int i = 0; i < N; ++i) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - return FlatSize(dims); -} - -template -inline int MatchingFlatSize(const Dims& dims, const Dims& check_dims_0, - const Dims& check_dims_1) { - for (int i = 0; i < N; ++i) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - return MatchingFlatSize(dims, check_dims_1); -} - -template -inline int MatchingFlatSize(const Dims& dims, const Dims& check_dims_0, - const Dims& check_dims_1, - const Dims& check_dims_2) { - for (int i = 0; i < N; ++i) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - return MatchingFlatSize(dims, check_dims_1, check_dims_2); -} - -template -inline int MatchingFlatSize(const Dims& dims, const Dims& check_dims_0, - const Dims& check_dims_1, - const Dims& check_dims_2, - const Dims& check_dims_3) { - for (int i = 0; i < N; ++i) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - return MatchingFlatSize(dims, check_dims_1, check_dims_2, check_dims_3); -} - -// Data is required to be contiguous, and so many operators can use either the -// full array flat size or the flat size with one dimension skipped (commonly -// the depth). -template -inline int FlatSizeSkipDim(const Dims& dims, int skip_dim) { - TFLITE_DCHECK(skip_dim >= 0 && skip_dim < N); - int flat_size = 1; - for (int i = 0; i < N; ++i) { - flat_size *= (i == skip_dim) ? 1 : dims.sizes[i]; - } - return flat_size; -} - -// A combination of MatchingFlatSize() and FlatSizeSkipDim(). -template -inline int MatchingFlatSizeSkipDim(const Dims& dims, int skip_dim, - const Dims& check_dims_0) { - for (int i = 0; i < N; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - } - return FlatSizeSkipDim(dims, skip_dim); -} - -template -inline int MatchingFlatSizeSkipDim(const Dims& dims, int skip_dim, - const Dims& check_dims_0, - const Dims& check_dims_1) { - for (int i = 0; i < N; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - } - return MatchingFlatSizeSkipDim(dims, skip_dim, check_dims_1); -} - -template -inline int MatchingFlatSizeSkipDim(const Dims& dims, int skip_dim, - const Dims& check_dims_0, - const Dims& check_dims_1, - const Dims& check_dims_2) { - for (int i = 0; i < N; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - } - return MatchingFlatSizeSkipDim(dims, skip_dim, check_dims_1, check_dims_2); -} - -template -inline int MatchingFlatSizeSkipDim(const Dims& dims, int skip_dim, - const Dims& check_dims_0, - const Dims& check_dims_1, - const Dims& check_dims_2, - const Dims& check_dims_3) { - for (int i = 0; i < N; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(ArraySize(dims, i), ArraySize(check_dims_0, i)); - } - } - return MatchingFlatSizeSkipDim(dims, skip_dim, check_dims_1, check_dims_2, - check_dims_3); -} - -// Data is required to be contiguous, and so many operators can use either the -// full array flat size or the flat size with one dimension skipped (commonly -// the depth). -inline int FlatSizeSkipDim(const RuntimeShape& shape, int skip_dim) { - const int dims_count = shape.DimensionsCount(); - TFLITE_DCHECK(skip_dim >= 0 && skip_dim < dims_count); - const auto* dims_data = shape.DimsData(); - int flat_size = 1; - for (int i = 0; i < dims_count; ++i) { - flat_size *= (i == skip_dim) ? 1 : dims_data[i]; - } - return flat_size; -} - -// A combination of MatchingFlatSize() and FlatSizeSkipDim(). -inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim, - const RuntimeShape& check_shape_0) { - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - } - return FlatSizeSkipDim(shape, skip_dim); -} - -inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1) { - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - } - return MatchingFlatSizeSkipDim(shape, skip_dim, check_shape_1); -} - -inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1, - const RuntimeShape& check_shape_2) { - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - } - return MatchingFlatSizeSkipDim(shape, skip_dim, check_shape_1, check_shape_2); -} - -inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim, - const RuntimeShape& check_shape_0, - const RuntimeShape& check_shape_1, - const RuntimeShape& check_shape_2, - const RuntimeShape& check_shape_3) { - const int dims_count = shape.DimensionsCount(); - for (int i = 0; i < dims_count; ++i) { - if (i != skip_dim) { - TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i)); - } - } - return MatchingFlatSizeSkipDim(shape, skip_dim, check_shape_1, check_shape_2, - check_shape_3); -} - -template -bool IsPackedWithoutStrides(const Dims& dims) { - int expected_stride = 1; - for (int d = 0; d < N; d++) { - if (dims.strides[d] != expected_stride) return false; - expected_stride *= dims.sizes[d]; - } - return true; -} - -template -void ComputeStrides(Dims* dims) { - dims->strides[0] = 1; - for (int d = 1; d < N; d++) { - dims->strides[d] = dims->strides[d - 1] * dims->sizes[d - 1]; - } -} - -enum class BroadcastableOpCategory : uint8_t { - kNone, - kNonBroadcast, // Matching input shapes. - kFirstInputBroadcastsFast, // Fivefold nested loops. - kSecondInputBroadcastsFast, // Fivefold nested loops. - kGenericBroadcast, // Fall-back. -}; - -struct MinMax { - float min; - float max; -}; -static_assert(sizeof(MinMax) == 8, ""); - -struct ActivationParams { - FusedActivationFunctionType activation_type; - // uint8_t, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; -}; - -struct ReluParams : public ActivationParams { - int32_t input_offset; - int32_t output_offset; - int32_t output_multiplier; - int output_shift; -}; - -// Styles of resizing op usages. For example, kImageStyle can be used with a Pad -// op for pattern-specific optimization. -enum class ResizingCategory : uint8_t { - kNone, - kImageStyle, // 4D, operating on inner dimensions, say {0, a, b, 0}. - kGenericResize, -}; - -// For Add, Sub, Mul ops. -struct ArithmeticParams { - // Shape dependent / common to data / op types. - BroadcastableOpCategory broadcast_category; - // uint8_t inference params. - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; - int32_t output_multiplier; - int output_shift; - // Add / Sub, not Mul, uint8_t inference params. - int left_shift; - int32_t input1_multiplier; - int input1_shift; - int32_t input2_multiplier; - int input2_shift; - - // TODO(b/158622529): Union the following activation params. - // uint8_t, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; - // int64_t activation params. - int64_t int64_activation_min; - int64_t int64_activation_max; - - // Processed output dimensions. - // Let input "a" be the one that broadcasts in the faster-changing dimension. - // Then, after coalescing, for shapes {a0, a1, a2, a3, a4} and - // {b0, b1, b2, b3, b4}, - // broadcast_shape[4] = b0 = a0. - // broadcast_shape[3] = b1; a1 = 1. - // broadcast_shape[2] = b2 = a2. - // broadcast_shape[1] = a3; b3 = 1. - // broadcast_shape[0] = b4 = a4. - int broadcast_shape[5]; -}; - -struct ConcatenationParams { - int8_t axis; - const int32_t* input_zeropoint; - const float* input_scale; - uint16_t inputs_count; - int32_t output_zeropoint; - float output_scale; -}; - -struct ComparisonParams { - // uint8_t inference params. - int left_shift; - int32_t input1_offset; - int32_t input1_multiplier; - int input1_shift; - int32_t input2_offset; - int32_t input2_multiplier; - int input2_shift; - // Shape dependent / common to inference types. - bool is_broadcast; -}; - -struct ConvParams { - PaddingType padding_type; - PaddingValues padding_values; - // TODO(starka): This was just "stride", so check that width+height is OK. - int16_t stride_width; - int16_t stride_height; - int16_t dilation_width_factor; - int16_t dilation_height_factor; - // uint8_t inference params. - // TODO(b/65838351): Use smaller types if appropriate. - int32_t input_offset; - int32_t weights_offset; - int32_t output_offset; - int32_t output_multiplier; - int output_shift; - // uint8_t, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; -}; - -struct DepthToSpaceParams { - int32_t block_size; -}; - -struct DepthwiseParams { - PaddingType padding_type; - PaddingValues padding_values; - int16_t stride_width; - int16_t stride_height; - int16_t dilation_width_factor; - int16_t dilation_height_factor; - int16_t depth_multiplier; - // uint8_t inference params. - // TODO(b/65838351): Use smaller types if appropriate. - int32_t input_offset; - int32_t weights_offset; - int32_t output_offset; - int32_t output_multiplier; - int output_shift; - // uint8_t, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; - const int32_t* output_multiplier_per_channel; - const int32_t* output_shift_per_channel; -}; - -struct DequantizationParams { - double scale; - int32_t zero_point; -}; - -struct PerChannelDequantizationParams { - const float* scale; - const int32_t* zero_point; - int32_t quantized_dimension; -}; - -struct FakeQuantParams { - MinMax minmax; - int32_t num_bits; -}; - -struct FullyConnectedParams { - // uint8_t inference params. - // TODO(b/65838351): Use smaller types if appropriate. - int32_t input_offset; - int32_t weights_offset; - int32_t output_offset; - int32_t output_multiplier; - int output_shift; - // uint8_t, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; - // Mark the operands as cacheable if they are unchanging, e.g. weights. - bool lhs_cacheable; - bool rhs_cacheable; - FullyConnectedWeightsFormat weights_format; -}; - -struct GatherParams { - int16_t axis; -}; - -struct L2NormalizationParams { - // uint8_t inference params. - int32_t input_zero_point; -}; - -struct LocalResponseNormalizationParams { - int32_t range; - double bias; - double alpha; - double beta; -}; - -struct HardSwishParams { - // zero_point of the input activations. - int16_t input_zero_point; - // zero_point of the output activations. - int16_t output_zero_point; - // 16bit fixed-point component of the multiplier to apply to go from the - // "high-res input scale", which is the input scale multiplied by 2^7, to the - // "relu-ish scale", which 3.0/32768. - // See the implementation of HardSwishPrepare. - int16_t reluish_multiplier_fixedpoint_int16; - // exponent/bit-shift component of the aforementioned multiplier. - int reluish_multiplier_exponent; - // 16bit fixed-point component of the multiplier to apply to go from the - // "high-res input scale", which is the input scale multiplied by 2^7, to the - // output scale. - // See the implementation of HardSwishPrepare. - int16_t output_multiplier_fixedpoint_int16; - // exponent/bit-shift component of the aforementioned multiplier. - int output_multiplier_exponent; -}; - -struct LogisticParams { - // uint8_t inference params. - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -struct LstmCellParams { - int32_t weights_zero_point; - int32_t accum_multiplier; - int accum_shift; - int state_integer_bits; -}; - -struct MeanParams { - int8_t axis_count; - int16_t axis[4]; -}; - -struct PackParams { - int8_t axis; - const int32_t* input_zeropoint; - const float* input_scale; - uint16_t inputs_count; - int32_t output_zeropoint; - float output_scale; -}; - -struct PadParams { - int8_t left_padding_count; - int32_t left_padding[4]; - int8_t right_padding_count; - int32_t right_padding[4]; - ResizingCategory resizing_category; -}; - -struct PreluParams { - int32_t input_offset; - int32_t alpha_offset; - int32_t output_offset; - int32_t output_multiplier_1; - int output_shift_1; - int32_t output_multiplier_2; - int output_shift_2; -}; - -struct PoolParams { - FusedActivationFunctionType activation; - PaddingType padding_type; - PaddingValues padding_values; - int stride_height; - int stride_width; - int filter_height; - int filter_width; - // uint8_t, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; -}; - -struct ReshapeParams { - int8_t shape_count; - int32_t shape[4]; -}; - -struct ResizeBilinearParams { - bool align_corners; - // half_pixel_centers assumes pixels are of half the actual dimensions, and - // yields more accurate resizes. Corresponds to the same argument for the - // original TensorFlow op in TF2.0. - bool half_pixel_centers; -}; - -struct ResizeNearestNeighborParams { - bool align_corners; - bool half_pixel_centers; -}; - -struct SliceParams { - int8_t begin_count; - int32_t begin[5]; - int8_t size_count; - int32_t size[5]; -}; - -struct SoftmaxParams { - // beta is not really used (not a Tensorflow parameter) and not implemented - // for LogSoftmax. - double beta; - // uint8_t inference params. Used even when beta defaults to 1.0. - int32_t input_multiplier; - int32_t input_left_shift; - // Reverse scaling is only used by LogSoftmax. - int32_t reverse_scaling_divisor; - int32_t reverse_scaling_right_shift; - int diff_min; - int32_t zero_point; - float scale; - float* table; - // int16 LUT for exp(x), where x uniform distributed between [-10.0 , 0.0] - int16_t* exp_lut; - // int16 LUT for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0] - int16_t* one_over_one_plus_x_lut; - uint8_t* uint8_table1; - uint8_t* uint8_table2; -}; - -struct SpaceToBatchParams { - // "Zero" padding for uint8_t means padding with the output offset. - int32_t output_offset; -}; - -struct SpaceToDepthParams { - int32_t block_size; -}; - -struct SplitParams { - // Graphs that split into, say, 2000 nodes are encountered. The indices in - // OperatorEdges are of type uint16_t. - uint16_t num_split; - int16_t axis; -}; - -struct SqueezeParams { - int8_t squeeze_dims_count; - int32_t squeeze_dims[4]; -}; - -struct StridedSliceParams { - int8_t start_indices_count; - int32_t start_indices[5]; - int8_t stop_indices_count; - int32_t stop_indices[5]; - int8_t strides_count; - int32_t strides[5]; - - int16_t begin_mask; - int16_t ellipsis_mask; - int16_t end_mask; - int16_t new_axis_mask; - int16_t shrink_axis_mask; -}; - -struct TanhParams { - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -struct TransposeParams { - int8_t perm_count; - int32_t perm[5]; -}; - -struct UnpackParams { - uint16_t num_split; - int16_t axis; -}; - -struct LeakyReluParams { - float alpha; - int32_t input_offset; - int32_t output_offset; - int32_t output_multiplier_alpha; - int32_t output_shift_alpha; - int32_t output_multiplier_identity; - int32_t output_shift_identity; -}; - -template -inline void SetActivationParams(float min, float max, P* params) { - params->float_activation_min = min; - params->float_activation_max = max; -} - -template -inline void SetActivationParams(int32_t min, int32_t max, P* params) { - params->quantized_activation_min = min; - params->quantized_activation_max = max; -} - -template -inline void SetActivationParams(int64_t min, int64_t max, P* params) { - params->int64_activation_min = min; - params->int64_activation_max = max; -} - -template -inline void GetActivationParams(const P& params, int32_t* min, int32_t* max) { - *min = params.quantized_activation_min; - *max = params.quantized_activation_max; -} - -template -inline void GetActivationParams(const P& params, float* min, float* max) { - *min = params.float_activation_min; - *max = params.float_activation_max; -} - -template -inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) { - *min = params.int64_activation_min; - *max = params.int64_activation_max; -} -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/kernel_util.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/kernel_util.cc deleted file mode 100644 index 88d91a85..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/kernel_util.cc +++ /dev/null @@ -1,480 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/kernel_util.h" - -#include -#include - -#include -#include -#include -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" - -namespace tflite { - -namespace { - -// Assumes tensor_index is a valid index (in bounds) -inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context, - int tensor_index) { - if (context->tensors != nullptr) { - return &context->tensors[tensor_index]; - } else { - return context->GetTensor(context, tensor_index); - } -} - -// Validate in a single place to reduce binary size -inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context, - int index, int max_size, - const int* tensor_indices, - int* tensor_index) { - if (index < 0 || index >= max_size) { - TF_LITE_KERNEL_LOG(const_cast(context), - "Invalid tensor index %d (not in [0, %d))\n", index, - max_size); - return kTfLiteError; - } - if (tensor_indices[index] == kTfLiteOptionalTensor) { - TF_LITE_KERNEL_LOG(const_cast(context), - "Tensor at index %d was optional but was expected\n", - index); - return kTfLiteError; - } - - *tensor_index = tensor_indices[index]; - return kTfLiteOk; -} - -// Same as above but returns -1 for invalid inputs instead of status + logging -// error. -inline int ValidateTensorIndexing(const TfLiteContext* context, int index, - int max_size, const int* tensor_indices) { - if (index >= 0 && index < max_size) { - const int tensor_index = tensor_indices[index]; - if (tensor_index != kTfLiteOptionalTensor) { - return tensor_index; - } - } - return -1; -} - -inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - const int tensor_index = ValidateTensorIndexing( - context, index, node->inputs->size, node->inputs->data); - if (tensor_index < 0) { - return nullptr; - } - return GetTensorAtIndex(context, tensor_index); -} - -inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context, - const TfLiteNode* node, int index, - const TfLiteTensor** tensor) { - int tensor_index; - TF_LITE_ENSURE_OK( - context, ValidateTensorIndexingSafe(context, index, node->inputs->size, - node->inputs->data, &tensor_index)); - *tensor = GetTensorAtIndex(context, tensor_index); - return kTfLiteOk; -} - -} // anonymous namespace. - -const TfLiteTensor* GetInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - return GetMutableInput(context, node, index); -} - -TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node, - int index, const TfLiteTensor** tensor) { - return GetMutableInputSafe(context, node, index, tensor); -} - -TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, - int index) { - TfLiteTensor* tensor = GetMutableInput(context, node, index); - return tensor->is_variable ? tensor : nullptr; -} - -TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, - int index) { - const int tensor_index = ValidateTensorIndexing( - context, index, node->outputs->size, node->outputs->data); - if (tensor_index < 0) { - return nullptr; - } - return GetTensorAtIndex(context, tensor_index); -} - -TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node, - int index, TfLiteTensor** tensor) { - int tensor_index; - TF_LITE_ENSURE_OK( - context, ValidateTensorIndexingSafe(context, index, node->outputs->size, - node->outputs->data, &tensor_index)); - *tensor = GetTensorAtIndex(context, tensor_index); - return kTfLiteOk; -} - -const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, - const TfLiteNode* node, int index) { - return GetInput(context, node, index); -} - -#ifndef TF_LITE_STATIC_MEMORY -TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node, - int index) { - const int tensor_index = ValidateTensorIndexing( - context, index, node->temporaries->size, node->temporaries->data); - if (tensor_index < 0) { - return nullptr; - } - return GetTensorAtIndex(context, tensor_index); -} - -TfLiteStatus GetTemporarySafe(const TfLiteContext* context, - const TfLiteNode* node, int index, - TfLiteTensor** tensor) { - int tensor_index; - TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe( - context, index, node->temporaries->size, - node->temporaries->data, &tensor_index)); - *tensor = GetTensorAtIndex(context, tensor_index); - return kTfLiteOk; -} - -const TfLiteTensor* GetIntermediates(TfLiteContext* context, - const TfLiteNode* node, int index) { - const int tensor_index = ValidateTensorIndexing( - context, index, node->intermediates->size, node->intermediates->data); - if (tensor_index < 0) { - return nullptr; - } - return GetTensorAtIndex(context, tensor_index); -} - -TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context, - const TfLiteNode* node, int index, - TfLiteTensor** tensor) { - int tensor_index; - TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe( - context, index, node->intermediates->size, - node->intermediates->data, &tensor_index)); - *tensor = GetTensorAtIndex(context, tensor_index); - return kTfLiteOk; -} -#endif // TF_LITE_STATIC_MEMORY - -// Per-axis -TfLiteStatus PopulateConvolutionQuantizationParams( - TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, - const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, - int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift) { - const auto* affine_quantization = - reinterpret_cast(filter->quantization.params); - return PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, activation, multiplier, shift, - output_activation_min, output_activation_max, per_channel_multiplier, - per_channel_shift, affine_quantization->scale->size); -} - -// Per-axis & per-tensor -TfLiteStatus PopulateConvolutionQuantizationParams( - TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, - const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, - int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) { - TF_LITE_ENSURE_EQ(context, input->quantization.type, - kTfLiteAffineQuantization); - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - // TODO(jianlijianli): Enable bias type check and bias scale == input scale - // * filter scale for each channel in affine quantization once bias - // quantization is properly populated. - // TF_LITE_ENSURE_EQ(context, bias->quantization.type, - // kTfLiteAffineQuantization); - - // Check data type. - const auto* affine_quantization = - reinterpret_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - const bool is_per_channel = affine_quantization->scale->size > 1; - if (is_per_channel) { - // Currently only Int8/Int16 is supported for per channel quantization. - TF_LITE_ENSURE(context, - input->type == kTfLiteInt8 || input->type == kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels); - TF_LITE_ENSURE_EQ( - context, num_channels, - filter->dims->data[affine_quantization->quantized_dimension]); - } - - // Populate multiplier and shift using affine quantization. - const float input_scale = input->params.scale; - const float output_scale = output->params.scale; - const float* filter_scales = affine_quantization->scale->data; - for (int i = 0; i < num_channels; ++i) { - // If per-tensor quantization parameter is specified, broadcast it along the - // quantization dimension (channels_out). - const float scale = is_per_channel ? filter_scales[i] : filter_scales[0]; - const double filter_scale = static_cast(scale); - const double effective_output_scale = static_cast(input_scale) * - filter_scale / - static_cast(output_scale); - int32_t significand; - int channel_shift; - QuantizeMultiplier(effective_output_scale, &significand, &channel_shift); - per_channel_multiplier[i] = significand; - per_channel_shift[i] = channel_shift; - } - - // Populate scalar quantization parameters. - // This check on legacy quantization parameters is kept only for backward - // compatibility. - if (input->type == kTfLiteUInt8) { - // Check bias scale == input scale * filter scale. - double real_multiplier = 0.0; - TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( - context, input, filter, bias, output, &real_multiplier)); - int exponent; - - // Populate quantization parameters with multiplier and shift. - QuantizeMultiplier(real_multiplier, multiplier, &exponent); - *shift = -exponent; - } - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || - input->type == kTfLiteInt16) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, activation, output, output_activation_min, - output_activation_max)); - } - return kTfLiteOk; -} - -TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, - const TfLiteTensor* input, - const TfLiteTensor* filter, - const TfLiteTensor* bias, - TfLiteTensor* output, - double* multiplier) { - const double input_product_scale = static_cast(input->params.scale) * - static_cast(filter->params.scale); - // The following conditions must be guaranteed by the training pipeline. - if (bias) { - const double bias_scale = static_cast(bias->params.scale); - // Here we're making sure the input_product_scale & bias_scale are about the - // same. Since we have: - // (output - output_zp) * output_scale = - // input_product_scale * input_product + bias * bias_scale ---- (0) - // - // (0) equals: - // (input_product + bias) * input_product_scale ----- (1) - // + - // bias * (bias_scale - input_product_scale) ------ (2) - // - // For the real kernel computation, we're doing (1), so we really need to - // make sure (2) has minimum impact on the output, so: - // bias * (bias_scale - input_product_scale) / output_scale should be - // a small number for an integer. - // Since normally bias should be within a small range. - // We should expect (bias_scale - input_product_scale) / output_scale to - // be a small number like 0.02. - const double scale_diff = std::abs(input_product_scale - bias_scale); - const double output_scale = static_cast(output->params.scale); - - TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02); - } - return GetQuantizedConvolutionMultipler(context, input, filter, output, - multiplier); -} - -TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, - const TfLiteTensor* input, - const TfLiteTensor* filter, - TfLiteTensor* output, - double* multiplier) { - const double input_product_scale = - static_cast(input->params.scale * filter->params.scale); - TF_LITE_ENSURE(context, input_product_scale >= 0); - *multiplier = input_product_scale / static_cast(output->params.scale); - - return kTfLiteOk; -} - -namespace { -void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, - int32_t qmin, int32_t qmax, - TfLiteTensor* output, - int32_t* act_min, int32_t* act_max) { - const auto scale = output->params.scale; - const auto zero_point = output->params.zero_point; - - auto quantize = [scale, zero_point](float f) { - return zero_point + static_cast(TfLiteRound(f / scale)); - }; - - if (activation == kTfLiteActRelu) { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = qmax; - } else if (activation == kTfLiteActRelu6) { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = std::min(qmax, quantize(6.0)); - } else if (activation == kTfLiteActReluN1To1) { - *act_min = std::max(qmin, quantize(-1.0)); - *act_max = std::min(qmax, quantize(1.0)); - } else { - *act_min = qmin; - *act_max = qmax; - } -} -} // namespace - -TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, - TfLiteFusedActivation activation, - TfLiteTensor* output, - int32_t* act_min, - int32_t* act_max) { - int32_t qmin = 0; - int32_t qmax = 0; - if (output->type == kTfLiteUInt8) { - qmin = std::numeric_limits::min(); - qmax = std::numeric_limits::max(); - } else if (output->type == kTfLiteInt8) { - qmin = std::numeric_limits::min(); - qmax = std::numeric_limits::max(); - } else if (output->type == kTfLiteInt16) { - qmin = std::numeric_limits::min(); - qmax = std::numeric_limits::max(); - } else { - TF_LITE_ENSURE(context, false); - } - - CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, - act_max); - return kTfLiteOk; -} - -bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { - return TfLiteIntArrayEqual(input1->dims, input2->dims); -} - -// TODO(petewarden): Having macros around this is ugly, look at other strategies -// before replicating this approach elsewhere. -#ifndef TF_LITE_STATIC_MEMORY -TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - TfLiteIntArray** output_shape) { - int dims1 = NumDimensions(input1); - int dims2 = NumDimensions(input2); - int out_dims = std::max(dims1, dims2); - if (NumElements(input1) == 0) { - *output_shape = TfLiteIntArrayCopy(input1->dims); - return kTfLiteOk; - } - std::unique_ptr shape( - TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); - for (int i = 0; i < out_dims; ++i) { - int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - TF_LITE_ENSURE(context, d1 == d2 || d1 == 1 || d2 == 1); - shape->data[out_dims - i - 1] = std::max(d1, d2); - } - *output_shape = shape.release(); - return kTfLiteOk; -} - -TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - const TfLiteTensor* input3, - TfLiteIntArray** output_shape) { - int dims1 = NumDimensions(input1); - int dims2 = NumDimensions(input2); - int dims3 = NumDimensions(input3); - int out_dims = std::max(std::max(dims1, dims2), dims3); - std::unique_ptr shape( - TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); - for (int i = 0; i < out_dims; ++i) { - int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); - int max_value = std::max(std::max(d1, d2), d3); - TF_LITE_ENSURE(context, d1 == 1 || d1 == max_value); - TF_LITE_ENSURE(context, d2 == 1 || d2 == max_value); - TF_LITE_ENSURE(context, d3 == 1 || d3 == max_value); - shape->data[out_dims - i - 1] = max_value; - } - *output_shape = shape.release(); - return kTfLiteOk; -} -#endif // TF_LITE_STATIC_MEMORY - -// Size of string is not constant, return 0 in such case. -int TfLiteTypeGetSize(TfLiteType type) { - switch (type) { - case kTfLiteUInt8: - TF_LITE_ASSERT_EQ(sizeof(uint8_t), 1); - return 1; - case kTfLiteInt8: - TF_LITE_ASSERT_EQ(sizeof(int8_t), 1); - return 1; - case kTfLiteBool: - return sizeof(bool); - case kTfLiteInt16: - TF_LITE_ASSERT_EQ(sizeof(int16_t), 2); - return 2; - case kTfLiteFloat16: - TF_LITE_ASSERT_EQ(sizeof(int16_t), 2); - return 2; - case kTfLiteFloat32: - TF_LITE_ASSERT_EQ(sizeof(float), 4); - return 4; - case kTfLiteInt32: - TF_LITE_ASSERT_EQ(sizeof(int32_t), 4); - return 4; - case kTfLiteInt64: - TF_LITE_ASSERT_EQ(sizeof(int64_t), 8); - return 8; - case kTfLiteUInt64: - TF_LITE_ASSERT_EQ(sizeof(uint64_t), 8); - return 8; - case kTfLiteFloat64: - TF_LITE_ASSERT_EQ(sizeof(double), 8); - return 8; - case kTfLiteComplex64: - TF_LITE_ASSERT_EQ(sizeof(std::complex), 8); - return 8; - case kTfLiteComplex128: - TF_LITE_ASSERT_EQ(sizeof(std::complex), 16); - return 16; - default: - return 0; - } -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/kernel_util.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/kernel_util.h deleted file mode 100644 index 7a1aa165..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/kernel_util.h +++ /dev/null @@ -1,293 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_ -#define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_ - -#include - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -// A fair number of functions in this header have historically been inline. -// It is ok to change functions to not be inline if the latency with -// benchmark_model for MobileNet + MobileBERT is unaffected. If such a change is -// made, move the newly non-inlined function declarations to the top of this -// header file. - -// Note: You must check if result is not null: -// -// TfLiteTensor* my_tensor = GetInput(context, node, kMyTensorIdx); -// TF_LITE_ENSURE(context, my_tensor != nullptr); -// -// This is because the index might point to the optional tensor constant -// (kTfLiteOptionalTensor) in which case there is no tensor to return. -const TfLiteTensor* GetInput(const TfLiteContext* context, - const TfLiteNode* node, int index); - -// Same as `GetInput` but returns boolean and uses output argument for tensor. -// -// TfLiteTensor* my_tensor; -// TF_LITE_ENSURE_OK(context, -// GetInputSafe(context, node, kMyTensorIdx, &my_tensor)); -// // can use my_tensor directly from here onwards, it is not nullptr -// -// Should be used in cases where the binary size is too large. -TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node, - int index, const TfLiteTensor** tensor); - -// Note: You must check if result is not null: -// -// TfLiteTensor* my_tensor = GetVariableInput(context, node, kMyTensorIdx); -// TF_LITE_ENSURE(context, my_tensor != nullptr); -// -// This is because the index might point to the optional tensor constant -// (kTfLiteOptionalTensor) in which case there is no tensor to return. -TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, - int index); - -// Note: You must check if result is not null: -// -// TfLiteTensor* my_tensor = GetOutput(context, node, kMyTensorIdx); -// TF_LITE_ENSURE(context, my_tensor != nullptr); -// -// This is because the index might point to the optional tensor constant -// (kTfLiteOptionalTensor) in which case there is no tensor to return. -TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node, - int index); - -// Same as `GetOutput` but returns boolean and uses output argument for tensor. -// -// TfLiteTensor* my_tensor; -// TF_LITE_ENSURE_OK(context, -// GetOutputSafe(context, node, kMyTensorIdx, &my_tensor)); -// // can use my_tensor directly from here onwards, it is not nullptr -// -// Should be used in cases where the binary size is too large. -TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node, - int index, TfLiteTensor** tensor); - -// Note: You must check if result is not null: -// -// TfLiteTensor* my_tensor = GetOptionalInputTensor(context, node, kIdx); -// TF_LITE_ENSURE(context, my_tensor != nullptr); -// -// This is because the index might point to the optional tensor constant -// (kTfLiteOptionalTensor) in which case there is no tensor to return. -// -// Deprecated. GetInput has the same functionality. -const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, - const TfLiteNode* node, int index); - -#ifndef TF_LITE_STATIC_MEMORY -// Note: You must check if result is not null: -// -// TfLiteTensor* my_tensor = GetTemporary(context, node, kMyTensorIdx); -// TF_LITE_ENSURE(context, my_tensor != nullptr); -// -// This is because the index might point to the optional tensor constant -// (kTfLiteOptionalTensor) in which case there is no tensor to return. -TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node, - int index); - -// Same as `GetTemporary` but returns boolean and uses output argument for -// tensor. -// -// TfLiteTensor* my_tensor; -// TF_LITE_ENSURE_OK(context, -// GetTemporarySafe(context, node, kMyTensorIdx, -// &my_tensor)); -// // can use my_tensor directly from here onwards, it is not nullptr -// -// Should be used in cases where the binary size is too large. -TfLiteStatus GetTemporarySafe(const TfLiteContext* context, - const TfLiteNode* node, int index, - TfLiteTensor** tensor); - -// Note: You must check if result is not null: -// -// TfLiteTensor* my_tensor = GetIntermediates(context, node, kMyTensorIdx); -// TF_LITE_ENSURE(context, my_tensor != nullptr); -// -// This is because the index might point to the optional tensor constant -// (kTfLiteOptionalTensor) in which case there is no tensor to return. -const TfLiteTensor* GetIntermediates(TfLiteContext* context, - const TfLiteNode* node, int index); - -// Same as `GetIntermediates` but returns boolean and uses output argument for -// tensor. -// -// TfLiteTensor* my_tensor; -// TF_LITE_ENSURE_OK(context, -// GetIntermediatesSafe(context, node, kMyTensorIdx, -// &my_tensor)); -// // can use my_tensor directly from here onwards, it is not nullptr -// -// Should be used in cases where the binary size is too large. -TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context, - const TfLiteNode* node, int index, - TfLiteTensor** tensor); -#endif // TF_LITE_STATIC_MEMORY - -inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; } -inline int SizeOfDimension(const TfLiteTensor* t, int dim) { - return t->dims->data[dim]; -} - -inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } -inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } - -#ifndef TF_LITE_STATIC_MEMORY -inline int NumIntermediates(const TfLiteNode* node) { - return node->intermediates->size; -} -#endif // TF_LITE_STATIC_MEMORY - -inline int64_t NumElements(const TfLiteIntArray* dims) { - int64_t count = 1; - for (int i = 0; i < dims->size; ++i) { - count *= dims->data[i]; - } - return count; -} - -inline int64_t NumElements(const TfLiteTensor* t) { - return NumElements(t->dims); -} - -// Determines whether tensor is constant. -// TODO(b/138199592): Introduce new query which checks for constant OR -// persistent-read-only, which would be useful for most tensor kernels that -// are potentially dynamic based on the input tensor value availability at the -// time of prepare. -inline bool IsConstantTensor(const TfLiteTensor* tensor) { - return tensor->allocation_type == kTfLiteMmapRo; -} - -// Determines whether tensor is dynamic. Note that a tensor can be non-const and -// not dynamic. This function specifically checks for a dynamic tensor. -inline bool IsDynamicTensor(const TfLiteTensor* tensor) { - return tensor->allocation_type == kTfLiteDynamic; -} - -// Sets tensor to dynamic. -inline void SetTensorToDynamic(TfLiteTensor* tensor) { - if (tensor->allocation_type != kTfLiteDynamic) { - tensor->allocation_type = kTfLiteDynamic; - tensor->data.raw = nullptr; - } -} - -// Sets tensor to persistent and read-only. -inline void SetTensorToPersistentRo(TfLiteTensor* tensor) { - if (tensor->allocation_type != kTfLitePersistentRo) { - tensor->allocation_type = kTfLitePersistentRo; - tensor->data.raw = nullptr; - } -} - -// Determines whether it is a hybrid op - one that has float inputs and -// quantized weights. -inline bool IsHybridOp(const TfLiteTensor* input, const TfLiteTensor* weight) { - return ((weight->type == kTfLiteUInt8 || weight->type == kTfLiteInt8) && - input->type == kTfLiteFloat32); -} - -// Check dimensionality match and populate OpData for Conv and DepthwiseConv. -TfLiteStatus PopulateConvolutionQuantizationParams( - TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, - const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, - int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift); - -TfLiteStatus PopulateConvolutionQuantizationParams( - TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, - const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, - int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels); - -// Calculates the multiplication factor for a quantized convolution (or -// quantized depthwise convolution) involving the given tensors. Returns an -// error if the scales of the tensors are not compatible. -TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, - const TfLiteTensor* input, - const TfLiteTensor* filter, - const TfLiteTensor* bias, - TfLiteTensor* output, - double* multiplier); - -TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, - const TfLiteTensor* input, - const TfLiteTensor* filter, - TfLiteTensor* output, - double* multiplier); - -// Calculates the useful quantized range of an activation layer given its -// activation tensor. -TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, - TfLiteFusedActivation activation, - TfLiteTensor* output, - int32_t* act_min, - int32_t* act_max); - -// Calculates the useful range of an activation layer given its activation -// tensor.a -template -void CalculateActivationRange(TfLiteFusedActivation activation, - T* activation_min, T* activation_max) { - if (activation == kTfLiteActRelu) { - *activation_min = 0; - *activation_max = std::numeric_limits::max(); - } else if (activation == kTfLiteActRelu6) { - *activation_min = 0; - *activation_max = 6; - } else if (activation == kTfLiteActReluN1To1) { - *activation_min = -1; - *activation_max = 1; - } else { - *activation_min = std::numeric_limits::lowest(); - *activation_max = std::numeric_limits::max(); - } -} - -// Return true if the given tensors have the same shape. -bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2); - -// Calculates the output_shape that is necessary for element-wise operations -// with broadcasting involving the two input tensors. -TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - TfLiteIntArray** output_shape); - -// Calculates the output_shape that is necessary for element-wise operations -// with broadcasting involving the three input tensors. -TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - const TfLiteTensor* input3, - TfLiteIntArray** output_shape); - -// Return the size of given type in bytes. Return 0 in in case of string. -int TfLiteTypeGetSize(TfLiteType type); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/op_macros.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/op_macros.h deleted file mode 100644 index 293dc76e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/op_macros.h +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ -#define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ - -// If we're on a platform without standard IO functions, fall back to a -// non-portable function. -#ifdef TF_LITE_MCU_DEBUG_LOG - -#include "tensorflow/lite/micro/debug_log.h" - -#define DEBUG_LOG(x) \ - do { \ - DebugLog(x); \ - } while (0) - -inline void InfiniteLoop() { - DEBUG_LOG("HALTED\n"); - while (1) { - } -} - -#define TFLITE_ABORT InfiniteLoop(); - -#else // TF_LITE_MCU_DEBUG_LOG - -#include -#include - -#define DEBUG_LOG(x) \ - do { \ - fprintf(stderr, "%s", (x)); \ - } while (0) - -// Report Error for unsupported type by op 'op_name' and returns kTfLiteError. -#define TF_LITE_UNSUPPORTED_TYPE(context, type, op_name) \ - do { \ - TF_LITE_KERNEL_LOG((context), "%s:%d Type %s is unsupported by op %s.", \ - __FILE__, __LINE__, TfLiteTypeGetName(type), \ - (op_name)); \ - return kTfLiteError; \ - } while (0) - -#define TFLITE_ABORT abort() - -#endif // TF_LITE_MCU_DEBUG_LOG - -#if defined(NDEBUG) || defined(ARDUINO) -#define TFLITE_ASSERT_FALSE (static_cast(0)) -#else -#define TFLITE_ASSERT_FALSE TFLITE_ABORT -#endif - -#define TF_LITE_FATAL(msg) \ - do { \ - DEBUG_LOG(msg); \ - DEBUG_LOG("\nFATAL\n"); \ - TFLITE_ABORT; \ - } while (0) - -#define TF_LITE_ASSERT(x) \ - do { \ - if (!(x)) TF_LITE_FATAL(#x); \ - } while (0) - -#define TF_LITE_ASSERT_EQ(x, y) \ - do { \ - if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ - } while (0) - -#endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/padding.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/padding.h deleted file mode 100644 index 1116b1da..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/kernels/padding.h +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ -#define TENSORFLOW_LITE_KERNELS_PADDING_H_ - -#include "tensorflow/lite/c/builtin_op_data.h" - -namespace tflite { - -// TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. -inline int ComputePadding(int stride, int dilation_rate, int in_size, - int filter_size, int out_size) { - int effective_filter_size = (filter_size - 1) * dilation_rate + 1; - int padding = ((out_size - 1) * stride + effective_filter_size - in_size) / 2; - return padding > 0 ? padding : 0; -} - -// It's not guaranteed that padding is symmetric. It's important to keep -// offset for algorithms need all paddings. -inline int ComputePaddingWithOffset(int stride, int dilation_rate, int in_size, - int filter_size, int out_size, - int* offset) { - int effective_filter_size = (filter_size - 1) * dilation_rate + 1; - int total_padding = - ((out_size - 1) * stride + effective_filter_size - in_size); - total_padding = total_padding > 0 ? total_padding : 0; - *offset = total_padding % 2; - return total_padding / 2; -} - -// Matching GetWindowedOutputSize in TensorFlow. -inline int ComputeOutSize(TfLitePadding padding, int image_size, - int filter_size, int stride, int dilation_rate = 1) { - int effective_filter_size = (filter_size - 1) * dilation_rate + 1; - switch (padding) { - case kTfLitePaddingSame: - return (image_size + stride - 1) / stride; - case kTfLitePaddingValid: - return (image_size + stride - effective_filter_size) / stride; - default: - return 0; - } -} - -inline TfLitePaddingValues ComputePaddingHeightWidth( - int stride_height, int stride_width, int dilation_rate_height, - int dilation_rate_width, int in_height, int in_width, int filter_height, - int filter_width, TfLitePadding padding, int* out_height, int* out_width) { - *out_width = ComputeOutSize(padding, in_width, filter_width, stride_width, - dilation_rate_width); - *out_height = ComputeOutSize(padding, in_height, filter_height, stride_height, - dilation_rate_height); - - TfLitePaddingValues padding_values; - int offset = 0; - padding_values.height = - ComputePaddingWithOffset(stride_height, dilation_rate_height, in_height, - filter_height, *out_height, &offset); - padding_values.height_offset = offset; - padding_values.width = - ComputePaddingWithOffset(stride_width, dilation_rate_width, in_width, - filter_width, *out_width, &offset); - padding_values.width_offset = offset; - return padding_values; -} -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_PADDING_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/all_ops_resolver.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/all_ops_resolver.cc deleted file mode 100644 index 8b87de8e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/all_ops_resolver.cc +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/all_ops_resolver.h" - -#include "tensorflow/lite/micro/kernels/micro_ops.h" - -namespace tflite { - -AllOpsResolver::AllOpsResolver() { - // Please keep this list of Builtin Operators in alphabetical order. - AddAbs(); - AddAdd(); - AddArgMax(); - AddArgMin(); - AddAveragePool2D(); - AddCeil(); - AddConcatenation(); - AddConv2D(); - AddCos(); - AddDepthwiseConv2D(); - AddDequantize(); - AddDetectionPostprocess(); - AddEqual(); - AddEthosU(); - AddFloor(); - AddFullyConnected(); - AddGreater(); - AddGreaterEqual(); - AddHardSwish(); - AddL2Normalization(); - AddLess(); - AddLessEqual(); - AddLog(); - AddLogicalAnd(); - AddLogicalNot(); - AddLogicalOr(); - AddLogistic(); - AddMaximum(); - AddMaxPool2D(); - AddMean(); - AddMinimum(); - AddMul(); - AddNeg(); - AddNotEqual(); - AddPack(); - AddPad(); - AddPadV2(); - AddPrelu(); - AddQuantize(); - AddReduceMax(); - AddRelu(); - AddRelu6(); - AddReshape(); - AddResizeNearestNeighbor(); - AddRound(); - AddRsqrt(); - AddShape(); - AddSin(); - AddSoftmax(); - AddSplit(); - AddSplitV(); - AddSqrt(); - AddSquare(); - AddStridedSlice(); - AddSub(); - AddSvdf(); - AddTanh(); - AddUnpack(); -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/all_ops_resolver.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/all_ops_resolver.h deleted file mode 100644 index 391b4f08..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/all_ops_resolver.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ -#define TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/micro_mutable_op_resolver.h" - -namespace tflite { - -// The magic number in the template parameter is the maximum number of ops that -// can be added to AllOpsResolver. It can be increased if needed. And most -// applications that care about the memory footprint will want to directly use -// MicroMutableOpResolver and have an application specific template parameter. -// The examples directory has sample code for this. -class AllOpsResolver : public MicroMutableOpResolver<128> { - public: - AllOpsResolver(); - - private: - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_ALL_OPS_RESOLVER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc deleted file mode 100644 index 254e194b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.cc +++ /dev/null @@ -1,2845 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h" - -// Keep model aligned to 8 bytes to guarantee aligned 64-bit accesses. -alignas(8) const unsigned char g_keyword_scrambled_model_data[] = { - 0x1c, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xd0, 0x6e, 0x00, 0x00, 0x60, 0x83, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xbc, 0x6e, 0x00, 0x00, 0xac, 0x56, 0x00, 0x00, - 0x9c, 0x52, 0x00, 0x00, 0x8c, 0x51, 0x00, 0x00, 0x7c, 0x4d, 0x00, 0x00, - 0x2c, 0x4d, 0x00, 0x00, 0x1c, 0x49, 0x00, 0x00, 0x0c, 0x45, 0x00, 0x00, - 0xfc, 0x43, 0x00, 0x00, 0xec, 0x3f, 0x00, 0x00, 0x9c, 0x3f, 0x00, 0x00, - 0x8c, 0x3b, 0x00, 0x00, 0x7c, 0x37, 0x00, 0x00, 0x6c, 0x36, 0x00, 0x00, - 0x5c, 0x32, 0x00, 0x00, 0x0c, 0x32, 0x00, 0x00, 0xfc, 0x2d, 0x00, 0x00, - 0xec, 0x29, 0x00, 0x00, 0xdc, 0x28, 0x00, 0x00, 0xcc, 0x24, 0x00, 0x00, - 0x7c, 0x24, 0x00, 0x00, 0x6c, 0x22, 0x00, 0x00, 0x5c, 0x1a, 0x00, 0x00, - 0xcc, 0x19, 0x00, 0x00, 0xbc, 0x15, 0x00, 0x00, 0xac, 0x0d, 0x00, 0x00, - 0x1c, 0x0d, 0x00, 0x00, 0x0c, 0x09, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, - 0x6c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x2a, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x34, 0xe1, 0x4f, 0xa1, 0x63, 0xa4, 0x62, 0xbf, 0x3e, 0x91, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xa3, 0xb2, 0x8f, 0xee, - 0x35, 0xe6, 0xf2, 0xcc, 0x68, 0xa0, 0x33, 0xc4, 0x7d, 0x4e, 0xbb, 0xa9, - 0x10, 0x32, 0x8e, 0x3d, 0x76, 0x14, 0x1c, 0x33, 0x0e, 0x77, 0xf7, 0xc8, - 0x7b, 0x45, 0xc7, 0xdb, 0xcf, 0x87, 0xc7, 0x70, 0xa9, 0x29, 0xfd, 0x70, - 0x32, 0x96, 0x35, 0x7d, 0xe9, 0xac, 0x6d, 0x9b, 0xfd, 0xe4, 0xbc, 0x4a, - 0x57, 0xcd, 0x43, 0xcc, 0x73, 0x72, 0xdf, 0x07, 0x68, 0xc5, 0x67, 0xbd, - 0x8a, 0x91, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - 0xb0, 0xfb, 0x5f, 0xdf, 0x0e, 0xb9, 0xa2, 0xfd, 0x66, 0x86, 0x13, 0x1b, - 0x6d, 0x1d, 0x53, 0xdb, 0x83, 0xbf, 0x44, 0x29, 0x3f, 0x93, 0xee, 0x42, - 0x9a, 0xf4, 0x31, 0x6e, 0xc3, 0x15, 0x7e, 0x48, 0x72, 0x50, 0xc3, 0x53, - 0xef, 0x35, 0x1f, 0xc2, 0x29, 0x42, 0xb4, 0xd7, 0x4b, 0xd7, 0x98, 0x60, - 0xb9, 0x3e, 0xbb, 0x31, 0x35, 0xc3, 0xf6, 0x15, 0x7a, 0x9a, 0x2c, 0xfd, - 0xff, 0x04, 0xd9, 0x04, 0x57, 0x52, 0xae, 0x99, 0xa3, 0x95, 0xae, 0x6a, - 0x66, 0x52, 0x5f, 0x91, 0x17, 0x83, 0x0d, 0x27, 0x16, 0x02, 0x06, 0x64, - 0x80, 0x05, 0x99, 0x1c, 0x6c, 0xab, 0xb1, 0xa1, 0x0e, 0x44, 0x1f, 0x63, - 0xe9, 0xc1, 0xab, 0x8d, 0x08, 0x79, 0x56, 0xe0, 0x90, 0xa5, 0xb8, 0x3b, - 0xc4, 0x1e, 0xa5, 0x1f, 0x64, 0xe4, 0x0b, 0x72, 0x62, 0x19, 0x5f, 0x66, - 0xc0, 0x9b, 0x7b, 0xc4, 0xe5, 0x9f, 0x82, 0xa7, 0x16, 0x92, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x3e, 0x3d, 0xf4, 0x61, - 0x45, 0x2a, 0x48, 0x53, 0x1f, 0x22, 0x74, 0x65, 0xea, 0x5a, 0x00, 0x83, - 0x68, 0xf9, 0xbb, 0xa3, 0xc2, 0x1a, 0x8f, 0xe1, 0xfb, 0x76, 0x6a, 0xe9, - 0x1a, 0x0e, 0x4d, 0x32, 0xc6, 0xf3, 0x8d, 0x85, 0x54, 0xa1, 0xe9, 0xb8, - 0x35, 0xee, 0xba, 0x53, 0x40, 0xa2, 0xea, 0x7f, 0xc3, 0x99, 0x71, 0x17, - 0xdd, 0xd5, 0xfe, 0xdf, 0x5e, 0x15, 0xa0, 0x73, 0xf8, 0x78, 0x49, 0x73, - 0xcc, 0xf0, 0x18, 0x12, 0x06, 0x81, 0xd6, 0x19, 0x2c, 0xa8, 0xd7, 0x80, - 0x19, 0x19, 0xbf, 0x1e, 0x50, 0xb1, 0xfb, 0xb3, 0xa6, 0x56, 0x6f, 0x52, - 0xa6, 0xc0, 0xdd, 0x3f, 0xbb, 0x13, 0x6e, 0x04, 0xdf, 0x79, 0xca, 0x8b, - 0xa5, 0x9c, 0xa1, 0x78, 0x49, 0xca, 0xe5, 0x29, 0xbb, 0x29, 0x7c, 0x96, - 0xc6, 0x29, 0x06, 0x99, 0xec, 0x50, 0xd1, 0xe8, 0x9b, 0xb7, 0x53, 0xd2, - 0x36, 0x89, 0xb1, 0x5c, 0x38, 0xf4, 0x2f, 0xa1, 0xda, 0x6f, 0xd8, 0xd1, - 0x62, 0xd2, 0xd4, 0x97, 0xce, 0xf1, 0xbd, 0x73, 0x2d, 0x92, 0xdb, 0x62, - 0x0c, 0xb0, 0x77, 0xed, 0x32, 0x3a, 0xfc, 0x59, 0x94, 0xef, 0x2b, 0x48, - 0x60, 0xb2, 0x82, 0xa2, 0xb6, 0x51, 0xdb, 0x51, 0x47, 0x99, 0x4c, 0x50, - 0x93, 0x53, 0x9d, 0xa9, 0x3c, 0x94, 0x34, 0x9f, 0xa6, 0x3e, 0x4f, 0x87, - 0xd4, 0xa0, 0x40, 0xeb, 0x7b, 0xfa, 0x1b, 0x7d, 0x03, 0xa8, 0xf8, 0x8b, - 0xa5, 0x32, 0x3a, 0xaf, 0x7e, 0x6b, 0x25, 0x08, 0x97, 0x71, 0x8d, 0x0c, - 0x30, 0xc9, 0xa7, 0x23, 0xe3, 0x51, 0xb3, 0xf2, 0x86, 0xad, 0x12, 0xe2, - 0x79, 0x94, 0x7f, 0xf3, 0xf7, 0x88, 0x67, 0x3e, 0x8e, 0x8e, 0x04, 0x5e, - 0x4f, 0x01, 0x6f, 0x1d, 0x78, 0x42, 0x9e, 0x47, 0x81, 0xdf, 0x03, 0x39, - 0x3d, 0x9b, 0xbd, 0xb6, 0x06, 0x21, 0x82, 0xfe, 0xf2, 0x50, 0xe1, 0x14, - 0xbc, 0xe3, 0x5e, 0xe1, 0xbd, 0x8f, 0xfa, 0x35, 0x31, 0x4e, 0x66, 0xeb, - 0x67, 0x49, 0x1c, 0x07, 0x88, 0xb6, 0x22, 0x0c, 0xeb, 0xd9, 0x9f, 0x9b, - 0x8b, 0xe0, 0x9c, 0x3c, 0xf7, 0x91, 0xab, 0x98, 0x5b, 0x0e, 0x09, 0xdd, - 0xe3, 0x0b, 0x14, 0x55, 0xe9, 0xe4, 0x42, 0xd8, 0xce, 0xd7, 0xfd, 0x4c, - 0x20, 0x9f, 0x44, 0x93, 0xa6, 0x17, 0x8a, 0x68, 0x8f, 0xec, 0x62, 0xd1, - 0x97, 0x9c, 0xcc, 0xc4, 0xd9, 0x42, 0xda, 0xf1, 0x34, 0x04, 0xc6, 0xb6, - 0x0f, 0xc7, 0xe6, 0x2d, 0x26, 0x6e, 0x6f, 0x92, 0x7e, 0xd9, 0xd4, 0x40, - 0xc6, 0x70, 0xfa, 0x12, 0x2a, 0x1b, 0xbc, 0x50, 0xeb, 0x3b, 0x24, 0x96, - 0x8d, 0x7c, 0xae, 0xbe, 0xc3, 0x27, 0xce, 0x97, 0xcf, 0xcd, 0x10, 0x13, - 0x01, 0xc6, 0x48, 0x6a, 0x99, 0x38, 0x79, 0xb9, 0x1c, 0xc9, 0x09, 0xac, - 0x96, 0x8c, 0xf7, 0x82, 0x8f, 0xb8, 0x17, 0x94, 0x2c, 0x5f, 0x40, 0xcc, - 0x80, 0xf4, 0x9f, 0xaa, 0xcb, 0x83, 0x13, 0x7b, 0x3a, 0x78, 0x0a, 0x9f, - 0x79, 0x9e, 0xfc, 0x0e, 0x8f, 0x98, 0x60, 0x39, 0x86, 0x44, 0x8e, 0x4b, - 0xc4, 0xad, 0xe6, 0x98, 0x92, 0x08, 0x84, 0x48, 0x8f, 0x1d, 0x78, 0x10, - 0x9e, 0xf7, 0xb8, 0x61, 0x65, 0x46, 0xdb, 0x4a, 0xcf, 0xc5, 0x37, 0xe3, - 0x77, 0x76, 0xcf, 0x0a, 0x7e, 0x72, 0x3f, 0xe4, 0x51, 0x30, 0x28, 0x57, - 0x13, 0xfd, 0xdb, 0x7e, 0xd6, 0xa3, 0xdd, 0x64, 0xdd, 0x00, 0xd0, 0x7f, - 0xbc, 0x48, 0x1d, 0xaf, 0xde, 0x0e, 0x45, 0xc4, 0xc9, 0xfa, 0xf6, 0xb2, - 0xb7, 0x9a, 0x42, 0x8b, 0x18, 0x08, 0xed, 0xdb, 0xa9, 0xc3, 0x32, 0xf1, - 0x9c, 0xcf, 0x16, 0x74, 0x57, 0xce, 0xe9, 0x44, 0x21, 0xdb, 0x8a, 0x45, - 0x89, 0x70, 0x41, 0x5c, 0xbf, 0x10, 0xdf, 0x83, 0x4a, 0xe4, 0x4c, 0xd8, - 0xc9, 0x2e, 0x5b, 0xa3, 0x05, 0xed, 0x73, 0xb1, 0xb0, 0xb7, 0xc4, 0xd7, - 0x0d, 0xea, 0xf6, 0xb4, 0xc1, 0x5e, 0x12, 0x54, 0x30, 0x73, 0x5c, 0x93, - 0xd9, 0xf7, 0xc9, 0x24, 0x43, 0x8f, 0x4f, 0x8e, 0x94, 0x95, 0xb6, 0xfd, - 0xa3, 0x14, 0x42, 0x50, 0xb8, 0x66, 0xfb, 0xc4, 0xed, 0x72, 0xcf, 0x7b, - 0xa9, 0x73, 0xeb, 0xc4, 0x4a, 0x05, 0xea, 0xb4, 0x47, 0xca, 0x21, 0x56, - 0x28, 0xa8, 0x87, 0xb8, 0x87, 0x0b, 0xe3, 0x8d, 0xfd, 0x70, 0xf7, 0x33, - 0x76, 0xf0, 0x3d, 0xa4, 0x3b, 0x83, 0xab, 0x14, 0x01, 0xe1, 0xb0, 0xa9, - 0x44, 0xe8, 0xd7, 0x50, 0x26, 0x0b, 0xbb, 0x2d, 0x57, 0x39, 0x82, 0x7c, - 0x71, 0xd8, 0x12, 0xaf, 0xf3, 0x9f, 0x46, 0xbd, 0x62, 0xd6, 0x61, 0xf5, - 0xb7, 0x04, 0x94, 0xbf, 0x87, 0xea, 0xc4, 0xc4, 0x33, 0xcf, 0x36, 0x3b, - 0x4f, 0xc7, 0x71, 0xf1, 0x98, 0xe6, 0xb0, 0x96, 0x25, 0xd7, 0xac, 0x75, - 0xfc, 0x92, 0xe0, 0x69, 0x72, 0x37, 0x8d, 0x40, 0x31, 0xaa, 0x2c, 0x86, - 0xfb, 0x95, 0x3f, 0x9c, 0x23, 0xd4, 0x39, 0x99, 0xff, 0xea, 0x95, 0x79, - 0xb9, 0x2e, 0xb0, 0x33, 0xf1, 0xe8, 0xd0, 0x42, 0xb5, 0x70, 0x5c, 0xca, - 0x69, 0x48, 0x28, 0x23, 0x58, 0xb4, 0x07, 0xfc, 0x3e, 0x15, 0x29, 0x00, - 0xa9, 0x22, 0x44, 0x70, 0xd0, 0xc7, 0x01, 0x0d, 0x3e, 0xfc, 0x57, 0xb7, - 0x54, 0x3a, 0xc3, 0x43, 0xd6, 0x2f, 0x55, 0x09, 0x52, 0x4a, 0x6b, 0x8e, - 0x4c, 0x82, 0xbb, 0x4e, 0x3e, 0x38, 0xe1, 0x9e, 0x72, 0x83, 0xec, 0x40, - 0xf5, 0xf7, 0x0e, 0x3c, 0x24, 0xed, 0xda, 0xf2, 0x39, 0x6c, 0xad, 0xeb, - 0xff, 0xfb, 0x4a, 0x38, 0x50, 0x49, 0x28, 0x3d, 0x05, 0xb2, 0x98, 0x44, - 0x2b, 0x61, 0xa2, 0x9b, 0x3a, 0x3c, 0xad, 0xd9, 0x8c, 0xef, 0x3c, 0x72, - 0x50, 0x74, 0x13, 0x80, 0xc4, 0x7e, 0x6e, 0xf3, 0xc9, 0xdf, 0x63, 0xf6, - 0x41, 0xb2, 0x08, 0x78, 0x9b, 0x7c, 0xa9, 0x13, 0xd1, 0x21, 0xe7, 0x5e, - 0x6a, 0x0d, 0x64, 0xf7, 0x52, 0x75, 0xf2, 0x80, 0x69, 0xbe, 0x43, 0xf8, - 0xd4, 0xad, 0x49, 0xfc, 0x97, 0x76, 0x1c, 0xb6, 0x43, 0x9e, 0xcb, 0x45, - 0x4d, 0x75, 0x07, 0xae, 0xdb, 0xbf, 0xf5, 0x8a, 0xeb, 0xb9, 0x6b, 0x12, - 0x06, 0xbf, 0x94, 0xad, 0x77, 0x29, 0xb1, 0xae, 0x24, 0x9b, 0x4d, 0xdc, - 0xe1, 0x5e, 0xd7, 0x57, 0xec, 0xd1, 0xd8, 0xad, 0xf0, 0x06, 0x08, 0x43, - 0x33, 0x99, 0xd2, 0x04, 0xfc, 0xc8, 0xf6, 0x53, 0x3d, 0x73, 0xd4, 0x36, - 0xd3, 0x8e, 0x4a, 0xcd, 0xb1, 0xe9, 0xcb, 0x3a, 0x5f, 0x54, 0xbc, 0xde, - 0x16, 0xa2, 0x85, 0xde, 0x35, 0x27, 0x99, 0x32, 0x4f, 0xb9, 0x2c, 0x16, - 0xa2, 0x6e, 0xae, 0x75, 0x60, 0x77, 0xe9, 0x08, 0x0f, 0x08, 0xc4, 0xd0, - 0x62, 0xc7, 0xd2, 0x1f, 0x3b, 0x29, 0xdd, 0xb7, 0xea, 0xa3, 0x58, 0xaf, - 0x4c, 0x05, 0xd2, 0x82, 0x6a, 0xe0, 0xc4, 0xe9, 0x70, 0x7e, 0xf2, 0xca, - 0x82, 0x6a, 0xae, 0xc1, 0x9a, 0x42, 0x5d, 0x46, 0x4a, 0xb7, 0x8f, 0x4d, - 0x33, 0xfe, 0x6f, 0x47, 0xb5, 0x49, 0xb3, 0x89, 0x51, 0x31, 0x74, 0x68, - 0x14, 0xda, 0x0a, 0x41, 0x3d, 0x1f, 0x8e, 0x30, 0x8c, 0x77, 0xd1, 0xa9, - 0x36, 0x41, 0x78, 0x34, 0xb7, 0x7e, 0x4e, 0x7a, 0x77, 0x12, 0x43, 0x97, - 0x43, 0xba, 0xd6, 0x28, 0x14, 0x2a, 0x9f, 0x98, 0xb4, 0x39, 0x08, 0x5c, - 0xb7, 0xb8, 0x03, 0x63, 0x62, 0x68, 0xc6, 0x9a, 0x4d, 0xf5, 0xdc, 0x7c, - 0x0f, 0x7e, 0x77, 0xdc, 0x85, 0x53, 0x31, 0x8c, 0x53, 0x8b, 0x27, 0xc4, - 0xb7, 0x3d, 0xd0, 0x94, 0x9b, 0x7e, 0x59, 0x59, 0x03, 0x09, 0x8c, 0x30, - 0x70, 0x7d, 0x9c, 0x73, 0x89, 0x6c, 0x5f, 0xbf, 0xf9, 0xc7, 0x72, 0x76, - 0x12, 0x98, 0xe3, 0xbe, 0xc3, 0x67, 0xdf, 0xa1, 0x76, 0xa3, 0xec, 0x44, - 0x30, 0x70, 0x2f, 0x6a, 0x86, 0x28, 0xb9, 0x9d, 0x7f, 0x93, 0xf2, 0x4a, - 0x34, 0x48, 0x1f, 0x2e, 0x2e, 0x95, 0x88, 0xdb, 0x1f, 0x2c, 0x19, 0x46, - 0x2e, 0x91, 0x5f, 0x81, 0x0d, 0x08, 0x9d, 0x03, 0x0b, 0xaf, 0x59, 0x0a, - 0x41, 0xad, 0x4d, 0x6c, 0x09, 0x0e, 0x9f, 0xd1, 0xc4, 0xdb, 0xac, 0x59, - 0x27, 0x04, 0x1c, 0x73, 0xe9, 0xf3, 0xe8, 0x54, 0xd9, 0x11, 0x31, 0xb2, - 0xed, 0x2d, 0x8c, 0xeb, 0x99, 0x26, 0x48, 0x9e, 0xac, 0x88, 0x96, 0xcb, - 0x19, 0x49, 0xfa, 0x4a, 0x82, 0xd5, 0x5d, 0xb8, 0x0f, 0x22, 0x3f, 0xb6, - 0x5c, 0x02, 0x2a, 0xb9, 0xd9, 0xfe, 0x4d, 0x9d, 0xdb, 0x85, 0x90, 0x19, - 0x7f, 0x1a, 0x44, 0xa3, 0x74, 0x68, 0xbf, 0xa2, 0x3b, 0xb4, 0x3b, 0xeb, - 0xab, 0x99, 0xc2, 0x46, 0x50, 0x7e, 0xec, 0xa9, 0xb4, 0x86, 0xfa, 0x50, - 0xcb, 0x71, 0x7e, 0x75, 0xa5, 0xca, 0xa6, 0x2f, 0x40, 0x1d, 0xa1, 0x4a, - 0x5c, 0x91, 0xd7, 0x2a, 0xa6, 0x17, 0x11, 0x4d, 0x19, 0x2b, 0xb3, 0x0f, - 0xf0, 0xb3, 0x06, 0x70, 0x51, 0x5c, 0x52, 0x8c, 0xdf, 0xe3, 0x19, 0x92, - 0x08, 0x40, 0xa2, 0xb4, 0xc0, 0xf2, 0xe8, 0x44, 0xcc, 0x36, 0xaa, 0xf9, - 0xf8, 0xfc, 0x2d, 0x83, 0x79, 0xc6, 0x58, 0xc1, 0xdf, 0x32, 0xb7, 0xde, - 0x0f, 0x3e, 0xc0, 0xa8, 0x7e, 0xeb, 0xf2, 0x30, 0x16, 0xdf, 0x38, 0xcb, - 0x69, 0xd9, 0x44, 0x0d, 0x44, 0xf4, 0x45, 0x9c, 0x81, 0xc8, 0xe7, 0x06, - 0xae, 0x95, 0xaf, 0xff, 0x17, 0x3b, 0x1c, 0x3f, 0xda, 0xa5, 0xf8, 0xfd, - 0x9c, 0xf1, 0x0a, 0xca, 0xda, 0xc0, 0xfa, 0x02, 0xc4, 0xce, 0x78, 0xfb, - 0x35, 0x8c, 0xfe, 0x55, 0xad, 0x0d, 0x9b, 0xeb, 0x10, 0xf1, 0x7b, 0xb1, - 0x09, 0xf8, 0xef, 0xfc, 0xde, 0x7a, 0x69, 0x74, 0x76, 0xef, 0x91, 0x64, - 0x33, 0xc4, 0x08, 0x15, 0x73, 0x85, 0x56, 0xae, 0x9c, 0xf6, 0xdd, 0x55, - 0x19, 0x96, 0xe6, 0x41, 0x12, 0xc9, 0x87, 0x91, 0x9e, 0xc6, 0x18, 0xe8, - 0xbf, 0xa0, 0x59, 0xfd, 0x20, 0xab, 0xb5, 0xcf, 0x0f, 0x6e, 0x30, 0xd3, - 0xc5, 0x70, 0xf2, 0x50, 0xa4, 0x2a, 0xdf, 0xb0, 0x45, 0xfc, 0x82, 0x1a, - 0x3b, 0xfe, 0x0c, 0xad, 0x41, 0x95, 0xf1, 0xd6, 0x85, 0xa2, 0xc9, 0xff, - 0xbe, 0x3a, 0x64, 0x70, 0x43, 0xc0, 0xc5, 0xc8, 0x80, 0x11, 0x0d, 0x20, - 0xcd, 0xf2, 0xa2, 0xbb, 0x43, 0x68, 0x0e, 0xf4, 0x01, 0xb3, 0x73, 0x79, - 0x9f, 0x68, 0x41, 0x63, 0x3e, 0xda, 0xf9, 0xf4, 0x23, 0x57, 0x97, 0x84, - 0x99, 0xe8, 0x5e, 0xdb, 0xaa, 0x24, 0xab, 0x9c, 0x40, 0x83, 0xf9, 0x3f, - 0x4f, 0x5a, 0x53, 0xa6, 0xf1, 0xe8, 0x95, 0xcf, 0xcb, 0x50, 0x13, 0x51, - 0xa7, 0x8c, 0x71, 0x1d, 0xff, 0xcc, 0x66, 0xab, 0xff, 0xca, 0xc5, 0xc3, - 0x73, 0x45, 0xb7, 0x21, 0x1d, 0x65, 0x7a, 0xe5, 0x1f, 0x3f, 0x1a, 0x58, - 0x23, 0x28, 0xc8, 0xf3, 0xbf, 0x98, 0x25, 0xc0, 0x83, 0x68, 0xf0, 0x62, - 0x63, 0x90, 0xcf, 0x1f, 0x20, 0xb8, 0x04, 0x5c, 0xc4, 0x80, 0x5b, 0xf4, - 0x6d, 0xdc, 0xe9, 0xac, 0xd8, 0x13, 0x3b, 0x42, 0xf8, 0x4e, 0xa2, 0x1c, - 0xce, 0x3f, 0x8d, 0x15, 0xd3, 0x87, 0x1b, 0x44, 0x79, 0x52, 0x34, 0x4b, - 0x63, 0x4d, 0xbf, 0x95, 0xec, 0xae, 0xf9, 0xc6, 0x7b, 0x7b, 0x85, 0x8c, - 0x4f, 0x20, 0x58, 0x9d, 0x48, 0x03, 0x2f, 0x77, 0x2e, 0x8b, 0x6f, 0x66, - 0x76, 0xb9, 0xb8, 0xb7, 0x34, 0x5a, 0x63, 0x06, 0x85, 0x82, 0x5f, 0x23, - 0x8f, 0x8d, 0x0c, 0x92, 0x3b, 0xd2, 0x8a, 0x1b, 0x39, 0xee, 0x6a, 0xbc, - 0xf6, 0x94, 0x2a, 0xc6, 0x73, 0xa6, 0x99, 0x98, 0xdc, 0x96, 0xd7, 0xc1, - 0xfe, 0x9b, 0xc8, 0xfb, 0x86, 0x5a, 0xad, 0xce, 0xf8, 0xd5, 0x32, 0x62, - 0x96, 0x63, 0xaf, 0x4c, 0x4a, 0xae, 0xec, 0x26, 0x3d, 0x84, 0x69, 0x50, - 0x5f, 0x37, 0x9b, 0x29, 0xac, 0x15, 0x76, 0x3d, 0x33, 0x96, 0x06, 0xde, - 0xc1, 0x6d, 0xa2, 0xc7, 0xc3, 0x8a, 0x20, 0x2e, 0xf7, 0x08, 0x55, 0x83, - 0x23, 0x9c, 0x23, 0x2d, 0x3a, 0xa1, 0x32, 0xbc, 0x47, 0x48, 0xd5, 0x6a, - 0x71, 0xb9, 0xcc, 0x2d, 0x99, 0xa0, 0x37, 0x07, 0x46, 0x45, 0xbe, 0xf0, - 0x27, 0x5a, 0x25, 0x72, 0x58, 0x47, 0x6d, 0xbf, 0x23, 0xdc, 0x48, 0x44, - 0x45, 0x95, 0xb1, 0x62, 0xf1, 0x7e, 0x4c, 0x95, 0x1c, 0xb4, 0x17, 0x8b, - 0x59, 0x2e, 0xf3, 0x4f, 0x45, 0x3b, 0x5d, 0x67, 0x92, 0x52, 0xd8, 0xc1, - 0x91, 0xfa, 0x53, 0xaa, 0x87, 0xc0, 0xa7, 0xb0, 0x9f, 0x10, 0xe8, 0xac, - 0x45, 0x52, 0xbb, 0x17, 0xee, 0xf6, 0x18, 0xbe, 0x02, 0x70, 0xce, 0x79, - 0x66, 0x72, 0xf9, 0xf6, 0xca, 0x66, 0xff, 0xa4, 0x9a, 0xd9, 0xb7, 0x07, - 0xa9, 0xc1, 0x23, 0x7e, 0x7b, 0x9c, 0xe3, 0x02, 0x7a, 0xcc, 0xa3, 0x67, - 0xb7, 0xb0, 0x37, 0xba, 0xae, 0x12, 0xda, 0x48, 0x6e, 0x7f, 0xde, 0x5f, - 0x75, 0x15, 0xca, 0xd2, 0x46, 0xdd, 0xb0, 0x82, 0xbf, 0x6d, 0xe9, 0x51, - 0x66, 0xa5, 0x9e, 0x0c, 0xd5, 0x03, 0xbd, 0x97, 0x0e, 0x1b, 0x88, 0xf6, - 0x61, 0x5a, 0x8b, 0xe0, 0xdd, 0x3e, 0x59, 0x4c, 0x35, 0xfd, 0xb0, 0x3b, - 0x79, 0x8c, 0x1c, 0x96, 0x97, 0x35, 0x62, 0x36, 0x62, 0x4c, 0x4b, 0x46, - 0xb1, 0x21, 0xf7, 0xf0, 0x34, 0xdc, 0xd9, 0x9f, 0xf8, 0x53, 0x7d, 0xca, - 0xbc, 0x4d, 0xaf, 0xf4, 0xb7, 0x2f, 0xa7, 0x5d, 0x18, 0xf9, 0x3b, 0xa9, - 0xb0, 0xbb, 0xdf, 0xfa, 0x28, 0x2b, 0x58, 0xce, 0x46, 0x01, 0x3f, 0x76, - 0xf2, 0x39, 0x45, 0x8b, 0x3c, 0xda, 0x62, 0x2b, 0x6b, 0xe1, 0x5f, 0x14, - 0xfc, 0x79, 0x17, 0x2d, 0xe2, 0xe5, 0x8c, 0xc5, 0xde, 0x91, 0xfd, 0xf5, - 0x6d, 0x9b, 0x6b, 0xbb, 0xb0, 0x13, 0xae, 0xbe, 0x1e, 0xa8, 0x8f, 0x3c, - 0xfd, 0x24, 0xbe, 0xb8, 0x39, 0x80, 0x03, 0x06, 0x8b, 0xff, 0xca, 0x90, - 0x88, 0x0f, 0x45, 0xc4, 0xeb, 0x50, 0x52, 0xf5, 0x00, 0x8c, 0x16, 0x9d, - 0x26, 0xaa, 0xec, 0xb1, 0x44, 0xd6, 0xfe, 0x67, 0xa3, 0xc1, 0xec, 0x4a, - 0x12, 0xa6, 0x7c, 0x7c, 0xc3, 0x46, 0x1c, 0x64, 0x61, 0x67, 0xec, 0xce, - 0x1e, 0xa2, 0xb4, 0xdd, 0x6e, 0x7f, 0x02, 0x14, 0xf4, 0x1c, 0x17, 0xa7, - 0x31, 0x9f, 0xc2, 0xc6, 0xc0, 0x21, 0x41, 0x88, 0x61, 0xd8, 0xca, 0x06, - 0xa5, 0xe4, 0xef, 0xa4, 0xaa, 0x4d, 0xa3, 0xad, 0x5f, 0xd4, 0x0c, 0x6b, - 0x14, 0x38, 0x2e, 0xe8, 0x87, 0x5a, 0x68, 0x10, 0x51, 0xd8, 0xbb, 0xa6, - 0xd9, 0xdc, 0xd3, 0x7f, 0x1f, 0xea, 0xa8, 0xcc, 0x3f, 0x43, 0xa4, 0x04, - 0x95, 0xb4, 0xde, 0x2f, 0x07, 0x5d, 0x91, 0x1c, 0x8e, 0xc3, 0xbc, 0xaa, - 0x46, 0x8a, 0xa8, 0x42, 0xa7, 0x2c, 0x0f, 0x1f, 0xb3, 0xe2, 0x8a, 0x0b, - 0xa0, 0x3f, 0xfb, 0x87, 0x9e, 0x42, 0xa5, 0x60, 0xce, 0x5a, 0x54, 0x91, - 0x26, 0x51, 0xea, 0x81, 0x6f, 0xf1, 0x54, 0x93, 0xe7, 0xa0, 0xf8, 0x64, - 0xab, 0x1d, 0x0d, 0x9d, 0x64, 0x6a, 0xd5, 0x19, 0x03, 0xbb, 0x94, 0x7f, - 0x0a, 0xb8, 0x6b, 0x87, 0xc3, 0x1a, 0x38, 0xe5, 0xe8, 0xba, 0x13, 0x17, - 0xeb, 0x13, 0xcc, 0xac, 0xcb, 0x1f, 0x96, 0x4c, 0x3b, 0x18, 0xfb, 0xe8, - 0x5c, 0x54, 0xce, 0x1a, 0x91, 0x44, 0xf5, 0x49, 0x6c, 0x38, 0x2a, 0x92, - 0x8a, 0x0d, 0x3d, 0x08, 0xc2, 0x5f, 0x6c, 0xac, 0x48, 0xb3, 0xdc, 0x2e, - 0xa6, 0x5a, 0xa8, 0xee, 0x22, 0x9a, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x96, 0xc5, 0x3a, 0x4e, 0x42, 0x7d, 0x27, 0xce, - 0x44, 0x84, 0xf1, 0x67, 0x8c, 0xc5, 0xdd, 0x75, 0x3b, 0x8a, 0xed, 0x2e, - 0x29, 0x62, 0x7b, 0xb0, 0xe6, 0xa3, 0xb4, 0x61, 0x73, 0x10, 0xff, 0x0e, - 0x0c, 0x98, 0x74, 0xef, 0xbb, 0xc4, 0xca, 0x03, 0x88, 0xa4, 0x96, 0x61, - 0xef, 0x36, 0x6d, 0xa2, 0xb1, 0xc8, 0xf0, 0xac, 0xf1, 0xb2, 0x08, 0x56, - 0xc7, 0x99, 0xcf, 0xae, 0x0a, 0x37, 0x85, 0x60, 0x78, 0x2d, 0x14, 0xda, - 0xb1, 0xa7, 0x00, 0xb6, 0x00, 0x04, 0x76, 0x80, 0x0e, 0x9f, 0x2a, 0x30, - 0x8b, 0x85, 0xd9, 0xc1, 0xaf, 0xee, 0x27, 0x80, 0x20, 0xed, 0xef, 0x25, - 0x5c, 0x98, 0x6b, 0xcc, 0xf8, 0x72, 0xfb, 0x3f, 0x13, 0xe6, 0x9b, 0x47, - 0xee, 0xa1, 0x18, 0x55, 0xa0, 0x68, 0xbe, 0xd4, 0x21, 0x59, 0x72, 0xa8, - 0xa4, 0xd2, 0x33, 0x57, 0x50, 0xfc, 0x6b, 0xa8, 0x49, 0x1b, 0x74, 0xdb, - 0x5a, 0x16, 0xb8, 0x52, 0x0c, 0xda, 0xa0, 0xa3, 0xff, 0x33, 0x56, 0x82, - 0x0f, 0x0a, 0x90, 0x82, 0xee, 0xf1, 0x1b, 0xb3, 0x05, 0x44, 0x39, 0x01, - 0xf7, 0x1e, 0xff, 0xcb, 0xea, 0xd0, 0xb6, 0x20, 0xbc, 0x84, 0xb1, 0xf9, - 0xa2, 0xc1, 0x56, 0xe6, 0xfa, 0x47, 0xc9, 0xfd, 0x45, 0x77, 0x51, 0x8e, - 0x01, 0xe4, 0x17, 0x20, 0x6f, 0x99, 0xe3, 0x90, 0x2f, 0xcc, 0xaf, 0xd9, - 0x61, 0x32, 0x91, 0x62, 0x58, 0xf4, 0x98, 0xf5, 0xf4, 0xeb, 0x13, 0xeb, - 0xdc, 0x8a, 0xac, 0xb2, 0x9e, 0xcf, 0xe7, 0xa7, 0xd4, 0x97, 0x22, 0x12, - 0x08, 0x10, 0x6d, 0x40, 0xea, 0x26, 0xea, 0x42, 0x29, 0x6e, 0x75, 0x62, - 0x47, 0x08, 0x17, 0xa8, 0x69, 0x0f, 0xf7, 0x35, 0x59, 0x23, 0x86, 0x83, - 0xfd, 0xb5, 0x61, 0x98, 0x9c, 0x4d, 0x37, 0xda, 0x9f, 0xfc, 0xfb, 0x16, - 0xb7, 0x6c, 0x52, 0xee, 0xa8, 0x9c, 0x3e, 0x93, 0x43, 0xc5, 0x2b, 0xd4, - 0xd0, 0x9f, 0x69, 0x2c, 0xc9, 0x1f, 0x2e, 0xdf, 0x5b, 0xe6, 0xc6, 0x5f, - 0x71, 0xd1, 0xd7, 0xb2, 0x8f, 0x3a, 0xba, 0x60, 0x75, 0x3d, 0x34, 0x41, - 0x43, 0x9b, 0x13, 0xc0, 0x3b, 0x30, 0xc5, 0xe9, 0x84, 0x81, 0xde, 0x85, - 0x4e, 0x65, 0x7b, 0x21, 0x37, 0xb8, 0xef, 0x24, 0x19, 0xaa, 0x26, 0x0c, - 0x27, 0xa7, 0xd9, 0x29, 0x47, 0x1a, 0x15, 0x42, 0x1e, 0x30, 0x79, 0x79, - 0x96, 0x09, 0x62, 0x26, 0xad, 0x98, 0x8b, 0xcb, 0x3d, 0xeb, 0x66, 0x83, - 0x77, 0xd9, 0x79, 0x4d, 0x05, 0x81, 0x72, 0xe9, 0xe0, 0x6f, 0x13, 0x00, - 0x7e, 0xa3, 0x92, 0x82, 0x1c, 0x90, 0x83, 0x4b, 0x15, 0x97, 0x0f, 0x92, - 0xe2, 0xd3, 0x3d, 0xd7, 0x6c, 0xb9, 0x60, 0x9a, 0x23, 0x52, 0xbe, 0x59, - 0xc9, 0x36, 0x9e, 0xf7, 0x77, 0x09, 0x79, 0x01, 0xcc, 0xec, 0x17, 0xd1, - 0x74, 0xbc, 0x58, 0x65, 0x45, 0x3c, 0x86, 0xf1, 0xbc, 0xbd, 0x95, 0x54, - 0x46, 0x45, 0x7b, 0x4c, 0xa2, 0xea, 0x2a, 0x6e, 0xa8, 0xd1, 0x66, 0x03, - 0xb2, 0x6a, 0xe0, 0xd3, 0x07, 0x8d, 0xe0, 0x09, 0x81, 0x42, 0xe3, 0x97, - 0xc4, 0xe7, 0x37, 0xc5, 0x82, 0xcf, 0xb1, 0xec, 0xba, 0xbd, 0xf4, 0xb6, - 0x41, 0xb2, 0xb8, 0xa6, 0x3a, 0x85, 0x4b, 0x4f, 0x46, 0x48, 0xe9, 0x9b, - 0x72, 0xf5, 0xb0, 0x64, 0x66, 0x75, 0x42, 0xb4, 0x00, 0xbe, 0x11, 0x6d, - 0x86, 0x93, 0x07, 0x50, 0xa7, 0xef, 0x55, 0x42, 0xcf, 0xe8, 0x61, 0xd0, - 0x9b, 0x11, 0x84, 0x8c, 0x74, 0xe4, 0xb8, 0x3f, 0x48, 0xb3, 0x61, 0xe3, - 0xea, 0x66, 0x86, 0x94, 0x95, 0x12, 0x77, 0x26, 0x75, 0x30, 0xb5, 0xd3, - 0x7a, 0xad, 0x2d, 0x58, 0x46, 0x1b, 0x4b, 0xd9, 0x2d, 0x1e, 0x0b, 0xff, - 0xd7, 0x03, 0x56, 0x3b, 0xbd, 0x65, 0xb0, 0xf9, 0xfe, 0x43, 0x1c, 0x9c, - 0x18, 0x82, 0x78, 0x5e, 0x06, 0x02, 0x21, 0x70, 0xb2, 0x7f, 0xb5, 0x63, - 0x71, 0x85, 0x95, 0x79, 0xae, 0x1e, 0xc6, 0x62, 0x7a, 0x7c, 0x63, 0x46, - 0x70, 0x1c, 0x58, 0x72, 0x1d, 0xde, 0xca, 0xb4, 0xfc, 0xc8, 0x56, 0x38, - 0x32, 0xf4, 0x0b, 0x56, 0x87, 0x6b, 0x5b, 0x53, 0xd2, 0x2c, 0x35, 0xef, - 0x5b, 0x33, 0x59, 0x13, 0x76, 0x82, 0x30, 0x80, 0x23, 0x10, 0x07, 0x4c, - 0x3f, 0xac, 0x9c, 0x58, 0x2d, 0x04, 0xe6, 0x6a, 0xd3, 0x5c, 0xf9, 0xb6, - 0x59, 0x4e, 0x85, 0xfe, 0x01, 0x71, 0xf0, 0xf7, 0xf2, 0x1f, 0x46, 0xd5, - 0x20, 0x3c, 0x9b, 0xc2, 0x1e, 0x73, 0x1c, 0x56, 0x9c, 0x76, 0x8c, 0x12, - 0x95, 0x51, 0xd4, 0x6f, 0x5b, 0x3a, 0xa7, 0x5f, 0xa7, 0xe4, 0xfa, 0xb7, - 0x1a, 0xdd, 0xb6, 0x4c, 0x01, 0x02, 0xae, 0x9c, 0x02, 0x0d, 0x66, 0x2f, - 0x40, 0x87, 0xa1, 0xbc, 0xf3, 0xde, 0xf4, 0xdb, 0x65, 0xee, 0xcc, 0xca, - 0xe1, 0x7a, 0xa2, 0xf4, 0xf7, 0xf5, 0x7c, 0x2a, 0x3f, 0xa4, 0x67, 0xbb, - 0x07, 0x50, 0x7a, 0x29, 0x8a, 0xcf, 0x2c, 0x7a, 0x0e, 0x0d, 0xc7, 0x95, - 0x8b, 0xf4, 0xe2, 0x50, 0xe1, 0xc1, 0x40, 0x16, 0x99, 0x5c, 0x72, 0xe7, - 0xe4, 0x01, 0xeb, 0x29, 0x6a, 0x99, 0xf2, 0x67, 0x23, 0x46, 0x1f, 0xaa, - 0xea, 0xc1, 0x51, 0x30, 0xeb, 0x7d, 0x34, 0x52, 0x91, 0x37, 0x2d, 0xc6, - 0x5c, 0x3a, 0x7c, 0x54, 0xc0, 0x79, 0xdc, 0xf9, 0xbf, 0x08, 0x2a, 0xf6, - 0xe1, 0x1e, 0xee, 0xc6, 0xd2, 0xe9, 0x30, 0x27, 0x60, 0x0c, 0xa2, 0x63, - 0x16, 0x06, 0x3d, 0xe2, 0xf5, 0x6f, 0xea, 0xe4, 0x4d, 0x9f, 0x2d, 0x36, - 0x62, 0x95, 0x47, 0x5d, 0x00, 0x22, 0x9f, 0x0c, 0xbb, 0x71, 0xad, 0xea, - 0xe7, 0x62, 0x59, 0x21, 0xd1, 0xaf, 0x04, 0x5a, 0xfc, 0x1f, 0x28, 0x6b, - 0x6f, 0x71, 0xec, 0xd4, 0xbd, 0x9c, 0x88, 0xfb, 0x3f, 0x04, 0xea, 0xd6, - 0xb2, 0x24, 0xe5, 0x28, 0xfe, 0xc5, 0x3e, 0x15, 0x00, 0x8c, 0xa2, 0xdf, - 0x18, 0x3d, 0x10, 0x9a, 0xb1, 0xcd, 0x64, 0xda, 0x87, 0x41, 0xc8, 0xa1, - 0x1c, 0x97, 0xd5, 0x44, 0xd9, 0x51, 0xd2, 0x96, 0xed, 0xad, 0x28, 0x1f, - 0x03, 0x89, 0x21, 0xbd, 0x79, 0x91, 0x48, 0x9c, 0x8e, 0x17, 0xfd, 0x36, - 0x72, 0xf6, 0x69, 0x4f, 0x3f, 0x02, 0x57, 0xcc, 0x3f, 0x1c, 0x49, 0x82, - 0x00, 0x45, 0x9e, 0x29, 0x83, 0x14, 0x12, 0xbb, 0xd2, 0xd0, 0x1a, 0x66, - 0x0f, 0x57, 0x24, 0xd4, 0x9f, 0x46, 0x0c, 0xf4, 0xb8, 0x28, 0x85, 0x52, - 0xe2, 0xa1, 0xc2, 0x3a, 0x8c, 0x34, 0x4a, 0x81, 0xe3, 0xbc, 0xa2, 0x67, - 0x67, 0x12, 0x13, 0xc4, 0xe7, 0xd7, 0x2c, 0x4e, 0xa9, 0xf5, 0xed, 0x63, - 0xf2, 0x18, 0x9c, 0x0c, 0xe2, 0x4d, 0x25, 0x23, 0x30, 0x3e, 0x49, 0x29, - 0xa6, 0x37, 0xdf, 0xc2, 0xdc, 0xf6, 0x5e, 0xae, 0x45, 0xd7, 0x8d, 0x56, - 0xba, 0x29, 0x4f, 0xee, 0xc9, 0x26, 0xd7, 0xbf, 0x10, 0x4d, 0x0a, 0x3b, - 0x3d, 0x1f, 0xd5, 0x72, 0xe1, 0xe6, 0xf5, 0x23, 0x4a, 0x17, 0x2d, 0xe4, - 0x40, 0x55, 0x9b, 0x39, 0x66, 0x36, 0xe4, 0x6d, 0x6d, 0xb6, 0x8d, 0x2a, - 0x7e, 0x76, 0x73, 0xa5, 0x86, 0x20, 0x3d, 0x18, 0xa0, 0x6c, 0x35, 0x59, - 0xc8, 0x1c, 0xef, 0x0f, 0x36, 0x1d, 0x6f, 0xba, 0x89, 0xb9, 0x9e, 0x7a, - 0x58, 0x1d, 0x43, 0xad, 0x85, 0x8b, 0x6b, 0xcc, 0x25, 0xb8, 0xe4, 0xdd, - 0xa1, 0x35, 0xd9, 0xef, 0xc4, 0xb1, 0xf6, 0x99, 0x27, 0x17, 0xb7, 0xbe, - 0xd1, 0x4f, 0xa1, 0x81, 0x4e, 0xb6, 0x19, 0xcd, 0xa0, 0x92, 0xeb, 0x56, - 0x41, 0x4f, 0x37, 0xca, 0x3b, 0x43, 0x85, 0x86, 0xdf, 0x5d, 0x5a, 0x8c, - 0xd4, 0x5b, 0xc4, 0x28, 0xdb, 0x16, 0xea, 0x3a, 0x2e, 0x9e, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xea, 0x59, 0x40, 0xc4, - 0x40, 0x8b, 0x6a, 0x8a, 0xb8, 0x7f, 0x1e, 0x0b, 0xfe, 0xab, 0xa4, 0xac, - 0x42, 0x91, 0xc5, 0xfa, 0x2c, 0x7e, 0xb4, 0xf9, 0x5c, 0xd5, 0x4c, 0x6a, - 0x74, 0x82, 0x90, 0x81, 0x96, 0xb0, 0xf4, 0xd4, 0xba, 0xc9, 0xa3, 0x2e, - 0x26, 0x0a, 0xc9, 0x55, 0x65, 0xac, 0xde, 0x83, 0x37, 0xec, 0x0e, 0xf6, - 0xdc, 0x8c, 0x34, 0xe6, 0x57, 0xde, 0x32, 0x0a, 0x02, 0x62, 0x4f, 0x6a, - 0x92, 0xa5, 0xb4, 0x40, 0xde, 0x57, 0xf4, 0xd1, 0xa3, 0x1c, 0xd3, 0xf7, - 0x4a, 0x15, 0xcc, 0x27, 0x26, 0x00, 0xba, 0xf3, 0xfa, 0x4e, 0xc6, 0xe9, - 0xc3, 0x05, 0x3d, 0x3a, 0x89, 0x96, 0x7d, 0x41, 0xac, 0xca, 0x28, 0x7f, - 0x69, 0x02, 0x40, 0x03, 0x93, 0x86, 0x85, 0x85, 0x73, 0x00, 0x09, 0x5a, - 0xcf, 0x5f, 0x1d, 0xaa, 0x46, 0x41, 0x9d, 0x08, 0xbf, 0xea, 0x45, 0x9b, - 0x93, 0xda, 0x9e, 0x81, 0xba, 0x9e, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x08, 0x00, 0x00, 0x6a, 0x1f, 0x9b, 0x03, 0xdd, 0xe4, 0x16, 0x07, - 0x7f, 0x5b, 0xb0, 0xee, 0xac, 0x55, 0xc4, 0x50, 0xe6, 0x2b, 0x17, 0xed, - 0x7f, 0x50, 0x4d, 0x71, 0x73, 0xae, 0xe0, 0x4d, 0xce, 0x08, 0xd9, 0x8b, - 0x83, 0x2c, 0x01, 0x48, 0x02, 0xd3, 0xbb, 0xca, 0x86, 0xd7, 0xca, 0x5f, - 0xc7, 0xce, 0x59, 0xdf, 0xc1, 0xcc, 0xf7, 0x7b, 0x54, 0xf8, 0x0d, 0x4f, - 0x81, 0x9e, 0x50, 0x6a, 0x65, 0x66, 0x4a, 0xec, 0x7a, 0x1b, 0x92, 0xb2, - 0x39, 0x8f, 0x5d, 0x41, 0x33, 0xcf, 0xe6, 0x1b, 0x34, 0x5d, 0xe1, 0xf6, - 0xef, 0xcb, 0xa0, 0x55, 0x7e, 0x1f, 0x45, 0x38, 0xb9, 0x56, 0x15, 0x3b, - 0x70, 0xab, 0xc8, 0x2f, 0x1c, 0xb9, 0x7d, 0x37, 0xe1, 0xb4, 0x03, 0x44, - 0x5a, 0xf6, 0x57, 0x97, 0x03, 0x54, 0x4c, 0x22, 0x88, 0xc3, 0x82, 0xfd, - 0x91, 0xc1, 0xf1, 0x63, 0xb4, 0x50, 0x46, 0x11, 0x64, 0x07, 0xfd, 0x85, - 0xe5, 0x78, 0x57, 0xdd, 0x19, 0x2a, 0x6b, 0x64, 0x3e, 0xec, 0xb8, 0xf3, - 0xb5, 0x95, 0x29, 0x72, 0xf1, 0x9d, 0xdd, 0xb9, 0xad, 0xd0, 0x78, 0x26, - 0x86, 0x10, 0x10, 0x19, 0xe4, 0x79, 0xae, 0xdc, 0x56, 0xb7, 0x54, 0x4f, - 0x94, 0xc6, 0x26, 0x9a, 0x93, 0xa8, 0x2e, 0x1b, 0x1c, 0xda, 0x87, 0x3a, - 0xa2, 0x44, 0xb9, 0x0b, 0x0f, 0xab, 0x70, 0x3b, 0xb7, 0x6c, 0xbf, 0x58, - 0x67, 0x32, 0x7d, 0xa3, 0x2a, 0xcb, 0x4e, 0x02, 0x92, 0xa1, 0x26, 0x0e, - 0x20, 0x5e, 0xb3, 0xec, 0xc4, 0x04, 0x5b, 0x7f, 0xe5, 0xbd, 0x30, 0xeb, - 0xc8, 0xdd, 0xf1, 0x72, 0x5a, 0x7e, 0xcb, 0x93, 0x22, 0xa0, 0x01, 0x9f, - 0xbb, 0x24, 0x9f, 0x50, 0x01, 0x1f, 0x24, 0x02, 0x85, 0x6d, 0xe6, 0x4d, - 0x55, 0xc4, 0x07, 0xe9, 0x87, 0x38, 0xbf, 0x1a, 0x3b, 0x05, 0x82, 0xc4, - 0x73, 0x4b, 0x87, 0x3c, 0xb4, 0x0a, 0x48, 0x8c, 0x06, 0x67, 0xe7, 0xbf, - 0xcc, 0xe7, 0xe5, 0xc3, 0xb2, 0x81, 0x60, 0xe2, 0xd1, 0xb1, 0x8f, 0x98, - 0xbd, 0x7d, 0xbd, 0x4e, 0x9a, 0xca, 0xbe, 0xcb, 0x81, 0x47, 0x25, 0xaa, - 0xfa, 0x91, 0xcf, 0x78, 0xce, 0xcb, 0x1a, 0x11, 0x79, 0xcf, 0x97, 0xa3, - 0x95, 0x95, 0x6f, 0xd7, 0xae, 0x80, 0xc9, 0xd5, 0x95, 0xb7, 0xcf, 0xe2, - 0x9d, 0x98, 0x65, 0x80, 0xfd, 0x2e, 0xee, 0x46, 0x5e, 0x46, 0x8c, 0xde, - 0x52, 0xb4, 0xdc, 0xce, 0xa8, 0xab, 0x4e, 0x0c, 0x12, 0x9f, 0x89, 0x9c, - 0x84, 0x80, 0xfe, 0x08, 0x64, 0x12, 0x12, 0x95, 0x62, 0xea, 0x65, 0xcc, - 0x34, 0x80, 0xcf, 0x92, 0x5f, 0xc2, 0xae, 0x76, 0xe7, 0x2f, 0xbb, 0xa8, - 0xdb, 0x6a, 0x66, 0x60, 0xaf, 0x88, 0xba, 0x65, 0x32, 0xcf, 0xf7, 0x6e, - 0xd8, 0xd0, 0x69, 0xb0, 0x12, 0x23, 0xd6, 0xc2, 0x32, 0xe5, 0x8e, 0x51, - 0xc5, 0x61, 0x28, 0x45, 0xf7, 0xf9, 0xea, 0x73, 0xce, 0x04, 0x2d, 0x56, - 0x43, 0x10, 0x8b, 0x4f, 0x6b, 0xfa, 0x32, 0xa8, 0x92, 0x8f, 0xd9, 0xb4, - 0xfd, 0xa4, 0x74, 0xa8, 0xea, 0xca, 0xd3, 0x84, 0xbb, 0x5a, 0x34, 0x57, - 0xf9, 0xda, 0x25, 0x40, 0x1f, 0x5e, 0xc2, 0x66, 0x43, 0x05, 0xdd, 0x13, - 0x88, 0x91, 0x60, 0xa1, 0x75, 0xd3, 0xc4, 0x27, 0xff, 0xda, 0x24, 0x3d, - 0xd9, 0xd7, 0x47, 0x46, 0x30, 0xd0, 0x76, 0xc4, 0x9e, 0x97, 0xe3, 0x43, - 0xd7, 0x45, 0xaf, 0x49, 0x36, 0xf2, 0x18, 0xdd, 0x3f, 0x86, 0x9a, 0xec, - 0x9a, 0x70, 0xeb, 0x5a, 0xe2, 0xa0, 0x4b, 0x45, 0x21, 0xb3, 0x32, 0x3d, - 0x0c, 0x8c, 0x03, 0x13, 0xae, 0x46, 0xb5, 0x1a, 0x0a, 0x03, 0x36, 0xfe, - 0xfe, 0xfa, 0xc9, 0x4d, 0x46, 0xf8, 0xfe, 0x6f, 0x99, 0x8c, 0xe4, 0x77, - 0x0c, 0x27, 0x59, 0xf7, 0xc3, 0xfc, 0x32, 0xb3, 0xa5, 0xae, 0xdc, 0x49, - 0xac, 0x31, 0x27, 0xa6, 0x14, 0x92, 0xfb, 0xe3, 0x69, 0x35, 0x8d, 0xa0, - 0x50, 0x55, 0x09, 0x90, 0xdf, 0x67, 0x08, 0x4c, 0x0e, 0xaf, 0x71, 0xc2, - 0xe8, 0xb8, 0xdc, 0x45, 0xe3, 0x6d, 0x58, 0x3f, 0x19, 0x8d, 0xcd, 0xeb, - 0xe3, 0x02, 0x49, 0xd8, 0xc8, 0x8b, 0x29, 0xb3, 0xef, 0x2b, 0xf0, 0x39, - 0x5c, 0x11, 0xaa, 0x52, 0x44, 0x0d, 0x1a, 0x3a, 0x7a, 0x62, 0xda, 0x6d, - 0xe3, 0xdd, 0x03, 0x30, 0x6d, 0x3e, 0x18, 0x30, 0x1d, 0xc0, 0xd0, 0x05, - 0x67, 0x98, 0xf5, 0x2a, 0xc7, 0xa1, 0x58, 0xd7, 0xf8, 0x6f, 0x7d, 0x07, - 0x59, 0x27, 0x95, 0xb9, 0x8d, 0x4d, 0xd7, 0xc8, 0x5e, 0x8b, 0x89, 0x14, - 0xb7, 0x1b, 0x35, 0xaa, 0x72, 0x02, 0x39, 0x3c, 0x41, 0x7c, 0x91, 0x93, - 0x81, 0xe1, 0xad, 0xbe, 0x77, 0x28, 0x80, 0xa2, 0x9c, 0xa8, 0x00, 0x18, - 0xa5, 0x70, 0xec, 0xec, 0x96, 0x95, 0x37, 0xa3, 0xee, 0x15, 0xa0, 0x69, - 0x0e, 0x05, 0xb5, 0xb4, 0xb6, 0xa7, 0x8b, 0xb9, 0x41, 0x88, 0x4f, 0x56, - 0x39, 0xa7, 0xbe, 0x24, 0xce, 0x4c, 0xe0, 0x9c, 0x24, 0x5a, 0xa1, 0xab, - 0xcd, 0x82, 0xf1, 0x16, 0x3f, 0xc0, 0xaf, 0xe1, 0x42, 0xe0, 0x7d, 0x1b, - 0xd9, 0x8f, 0xb8, 0x04, 0xa1, 0x88, 0xd9, 0xc3, 0xaf, 0x4f, 0xda, 0xfd, - 0x0b, 0x5c, 0xc3, 0x04, 0xf3, 0xdb, 0xe6, 0x76, 0x6e, 0xe9, 0xdc, 0xea, - 0x6f, 0xa2, 0xa5, 0x75, 0x2c, 0xc7, 0x91, 0x7d, 0x4b, 0xd5, 0x68, 0x55, - 0xbb, 0x2d, 0x14, 0xdb, 0x06, 0x76, 0xf7, 0xcc, 0x0a, 0x88, 0x6c, 0x2b, - 0xa1, 0x57, 0xd6, 0x15, 0x9c, 0x46, 0xcf, 0x5b, 0x6f, 0x9e, 0x7e, 0xc5, - 0x39, 0xda, 0x97, 0x26, 0x5e, 0xf5, 0x25, 0x06, 0xed, 0x8e, 0x9b, 0x1d, - 0x1b, 0x91, 0x07, 0x89, 0x08, 0xce, 0xd7, 0x38, 0x43, 0x64, 0x8e, 0xf5, - 0x3a, 0x52, 0x4a, 0xfb, 0x3e, 0xff, 0x2c, 0xb3, 0x78, 0x40, 0xb5, 0xdd, - 0xb2, 0x8a, 0xd3, 0x6a, 0xc5, 0xb0, 0xa3, 0x4a, 0xb8, 0xe7, 0x27, 0xa0, - 0x5a, 0x8f, 0x0f, 0xda, 0x53, 0x49, 0xc9, 0x77, 0x2a, 0xef, 0x78, 0xc6, - 0xec, 0xaf, 0x10, 0xe5, 0x71, 0xc5, 0x7a, 0x85, 0xdf, 0xb2, 0x85, 0x02, - 0xe3, 0x55, 0x7a, 0x91, 0x3a, 0x68, 0xb2, 0x9d, 0x3d, 0xd9, 0x01, 0xc5, - 0x5f, 0x3c, 0xa8, 0x1d, 0x99, 0xc6, 0xe7, 0xad, 0x09, 0xd1, 0x39, 0x3a, - 0x92, 0xc5, 0x77, 0x9c, 0xdf, 0x99, 0x56, 0x9f, 0xfe, 0xf8, 0xfd, 0xc8, - 0x4f, 0x19, 0xa3, 0xa0, 0xdf, 0xff, 0x17, 0xac, 0xa9, 0x03, 0x32, 0x85, - 0x4c, 0x29, 0xca, 0x89, 0x58, 0xdc, 0x88, 0xdd, 0xeb, 0x79, 0x68, 0x5e, - 0x0f, 0x37, 0x1a, 0xf7, 0x05, 0xfd, 0x39, 0x91, 0x25, 0x61, 0xf3, 0x04, - 0xda, 0x97, 0xfc, 0x7b, 0xcc, 0x40, 0x63, 0xfd, 0x5b, 0x3b, 0x27, 0x8e, - 0x92, 0x6d, 0x98, 0x0f, 0xcc, 0x9c, 0x9b, 0xda, 0xb2, 0xc6, 0xca, 0x56, - 0xff, 0x7e, 0xcc, 0xa2, 0xc0, 0x45, 0x3e, 0xf6, 0xdf, 0xa7, 0xe8, 0x2a, - 0xef, 0x0c, 0xde, 0xec, 0xa4, 0x1d, 0x2c, 0x3e, 0x03, 0xfd, 0xa4, 0x44, - 0x60, 0x4a, 0xf5, 0x83, 0x8f, 0x09, 0x2d, 0xe8, 0xd5, 0x46, 0xf6, 0x1c, - 0x2d, 0x39, 0x28, 0x0c, 0xdf, 0xa1, 0x2b, 0x05, 0x6e, 0x3c, 0x36, 0xdd, - 0x91, 0x81, 0x52, 0xf1, 0x56, 0xdc, 0xbb, 0x79, 0x62, 0xd8, 0x2e, 0x27, - 0x5d, 0x9f, 0x3c, 0xce, 0x81, 0x5c, 0x70, 0xe5, 0x4d, 0x33, 0x06, 0xd5, - 0x14, 0x04, 0xb7, 0xbc, 0x7b, 0x7a, 0xb4, 0xf7, 0x4a, 0x48, 0x8f, 0x97, - 0x85, 0x96, 0x69, 0xc9, 0x40, 0x52, 0xb1, 0x1c, 0x28, 0x82, 0xb3, 0x63, - 0xee, 0x94, 0x2f, 0xcb, 0x40, 0xad, 0xd7, 0x78, 0xb1, 0xc4, 0x21, 0x05, - 0x36, 0xd9, 0x46, 0xf0, 0x83, 0xcd, 0xee, 0x52, 0x7a, 0xa6, 0xa4, 0x40, - 0xb0, 0x2f, 0xf0, 0x1c, 0xfa, 0x42, 0x98, 0x54, 0x5b, 0xfe, 0x5e, 0xd6, - 0x84, 0x73, 0xca, 0x39, 0xbe, 0x87, 0xf2, 0x92, 0xee, 0x3d, 0x21, 0xcc, - 0x69, 0x81, 0xe5, 0xe8, 0x8a, 0xc3, 0x23, 0x64, 0x98, 0xd5, 0x1d, 0xcd, - 0x5c, 0x6c, 0x37, 0xc8, 0x8b, 0x08, 0x22, 0x12, 0x9f, 0x85, 0xc9, 0xed, - 0xb4, 0xa6, 0x07, 0xe1, 0x62, 0x79, 0x35, 0x5d, 0x26, 0x11, 0x4a, 0x6b, - 0x33, 0x37, 0x91, 0x78, 0xe8, 0xe2, 0xba, 0x8b, 0x8a, 0xb7, 0xbb, 0x0f, - 0xd2, 0xb3, 0xa2, 0x02, 0x0c, 0x57, 0x35, 0x99, 0x88, 0x6b, 0x9b, 0x64, - 0x79, 0x1f, 0x4a, 0x48, 0xd4, 0x3b, 0x5c, 0xeb, 0xb4, 0x83, 0xc3, 0xad, - 0x9c, 0x6a, 0xb0, 0xcf, 0x7f, 0x70, 0xe8, 0x22, 0x46, 0x25, 0xfe, 0x7e, - 0x02, 0x44, 0x83, 0x02, 0xb3, 0x08, 0x2e, 0x34, 0x08, 0x4b, 0xff, 0xa2, - 0xc1, 0x60, 0xbb, 0xd8, 0x89, 0x16, 0xf8, 0xaa, 0xab, 0xea, 0xf7, 0xa0, - 0x10, 0x9a, 0xc9, 0xe9, 0xa4, 0x81, 0xa7, 0x87, 0x32, 0x5b, 0xc1, 0xd0, - 0xd9, 0x70, 0x6f, 0xb6, 0x7c, 0x65, 0xd5, 0x0e, 0x65, 0x93, 0xfe, 0x6d, - 0x66, 0xaa, 0xab, 0xd0, 0x03, 0x07, 0xf2, 0xbe, 0x39, 0xd6, 0xc8, 0xac, - 0xf2, 0x06, 0x58, 0x58, 0x46, 0xc0, 0x1a, 0xbd, 0xa4, 0x96, 0x38, 0x31, - 0x32, 0x89, 0x04, 0xdf, 0xcd, 0x3c, 0x2e, 0x98, 0xb8, 0x39, 0xba, 0xe2, - 0xca, 0x6b, 0xd0, 0x53, 0xce, 0x4a, 0xc8, 0x95, 0x81, 0x84, 0x17, 0xce, - 0x7f, 0x1d, 0xc1, 0x5a, 0xc4, 0xc2, 0x73, 0x30, 0x6d, 0x0b, 0x8c, 0xf8, - 0x66, 0x38, 0x4e, 0xa3, 0x14, 0x84, 0x15, 0x36, 0x9e, 0x0d, 0x56, 0x6b, - 0xa6, 0x77, 0x65, 0xa4, 0x2c, 0x77, 0x00, 0x8b, 0x43, 0x57, 0xc6, 0x25, - 0xc5, 0xd0, 0x17, 0x79, 0x6b, 0x5d, 0xbc, 0xcd, 0xc8, 0x25, 0x8f, 0x20, - 0x09, 0xcc, 0xbd, 0x80, 0x10, 0xdf, 0x35, 0xf6, 0x9c, 0x04, 0x80, 0x23, - 0xdc, 0x97, 0xe0, 0xba, 0x29, 0x48, 0x2e, 0x95, 0x0f, 0xb1, 0x9b, 0xc7, - 0xe6, 0x0b, 0x89, 0x16, 0xe2, 0x81, 0x3b, 0x32, 0x69, 0xc4, 0xde, 0xc6, - 0x12, 0x09, 0x47, 0xff, 0x50, 0xe4, 0x45, 0xb7, 0x35, 0xd2, 0x61, 0x9b, - 0x52, 0x6e, 0xbe, 0xaf, 0xd2, 0xeb, 0x0c, 0x50, 0xf1, 0x57, 0x9f, 0x59, - 0xe1, 0xc1, 0x4f, 0x8c, 0x79, 0x07, 0x05, 0xce, 0x8d, 0x64, 0xb2, 0xf0, - 0xd3, 0x4f, 0xe1, 0x7b, 0xfa, 0x30, 0x0a, 0xc2, 0x5d, 0x0c, 0x47, 0x6c, - 0x17, 0x77, 0x1f, 0xe5, 0xd8, 0x14, 0xfd, 0xc1, 0x01, 0x70, 0x51, 0x60, - 0xb2, 0x20, 0xfd, 0x86, 0xbc, 0x19, 0x5e, 0x01, 0xa6, 0x19, 0x3a, 0x21, - 0xa5, 0x0a, 0x1c, 0xd9, 0xa9, 0x78, 0xbb, 0xc9, 0x01, 0x65, 0xe4, 0xb3, - 0x48, 0xb8, 0xe1, 0xe7, 0xb5, 0xf4, 0x4e, 0xa9, 0xb6, 0xe2, 0x5b, 0xeb, - 0xf5, 0x76, 0x06, 0x1a, 0xd9, 0x08, 0x40, 0xff, 0x72, 0xb2, 0xe3, 0x01, - 0x50, 0xb1, 0xad, 0xb3, 0xa3, 0xf6, 0xef, 0x72, 0x05, 0x0c, 0xf4, 0xce, - 0x24, 0x2c, 0x63, 0x89, 0x63, 0x9e, 0x21, 0xb8, 0xb0, 0xbe, 0xc7, 0x45, - 0xae, 0x47, 0x2b, 0x9e, 0x61, 0x81, 0x4c, 0x76, 0x96, 0x7b, 0x18, 0x37, - 0x74, 0xcb, 0x00, 0xef, 0x38, 0x72, 0x24, 0x0a, 0x63, 0xc1, 0x64, 0xd6, - 0x41, 0xc8, 0x6a, 0xf1, 0xe7, 0x11, 0x20, 0x4b, 0xc2, 0x95, 0x70, 0xb8, - 0xf8, 0x8f, 0xd9, 0xae, 0x8c, 0x12, 0xd8, 0x6f, 0x63, 0x30, 0xca, 0x56, - 0x46, 0x11, 0xda, 0x49, 0x1f, 0x84, 0x3d, 0xae, 0xab, 0x78, 0x29, 0x02, - 0x6c, 0x43, 0xa3, 0xef, 0x9d, 0x97, 0x59, 0x15, 0x53, 0xcd, 0xc7, 0x47, - 0x65, 0x30, 0xc7, 0xae, 0x31, 0x4a, 0x41, 0xb4, 0x66, 0x9c, 0xbb, 0x51, - 0x0b, 0xbd, 0xe2, 0x7d, 0x41, 0x2c, 0xd0, 0x75, 0x57, 0x93, 0xce, 0x2e, - 0xeb, 0x31, 0x7f, 0x56, 0xb2, 0xa4, 0x2b, 0x9f, 0xcc, 0xef, 0x6f, 0xf0, - 0x77, 0x19, 0xad, 0x4d, 0x2e, 0x37, 0x00, 0x75, 0x53, 0xae, 0x22, 0x44, - 0x69, 0x1c, 0x8a, 0x90, 0xf2, 0xcd, 0x0f, 0x6b, 0x37, 0xdb, 0xfd, 0x71, - 0x64, 0x80, 0xd8, 0x57, 0x1b, 0x8f, 0xff, 0x14, 0xd4, 0x5f, 0xe1, 0xd1, - 0x0f, 0x06, 0x13, 0x61, 0x29, 0xa9, 0x80, 0x9d, 0xc7, 0x8a, 0xa0, 0xb5, - 0xaa, 0xfc, 0xe0, 0xb4, 0xb4, 0xf0, 0x31, 0xf0, 0xec, 0x78, 0x03, 0x28, - 0xb9, 0xf7, 0xd9, 0xa7, 0xc8, 0xad, 0x2e, 0x16, 0xb8, 0x18, 0x82, 0x43, - 0x66, 0x8b, 0xae, 0xb2, 0x45, 0x2b, 0x0c, 0x9d, 0x69, 0xbd, 0x1b, 0xc5, - 0x20, 0xc6, 0x41, 0xe7, 0x4f, 0x4b, 0x7b, 0x46, 0x3d, 0x7a, 0x6d, 0x9f, - 0x13, 0x2e, 0x0f, 0xf3, 0x85, 0x3e, 0x5b, 0x12, 0xe5, 0xbf, 0x1b, 0x20, - 0xc3, 0x5f, 0x6b, 0xf7, 0xf7, 0xa3, 0xd7, 0x33, 0xd2, 0xcb, 0x18, 0xa5, - 0xa4, 0xa2, 0xd3, 0x59, 0x91, 0x9a, 0x04, 0xfa, 0x9d, 0xa5, 0x55, 0xad, - 0x09, 0x5a, 0x1e, 0x0b, 0x10, 0xd0, 0x46, 0x18, 0xe4, 0x09, 0xe8, 0x1b, - 0x44, 0xd3, 0x78, 0x45, 0xc0, 0xdf, 0xa2, 0xef, 0xfc, 0x59, 0x8a, 0x1b, - 0x22, 0x60, 0xc9, 0x58, 0x7d, 0x65, 0x45, 0xa9, 0xac, 0xd5, 0xd4, 0xc4, - 0x44, 0xd3, 0x08, 0x44, 0x40, 0x4d, 0x3d, 0x7e, 0x39, 0x81, 0x72, 0x15, - 0x49, 0xd7, 0x2c, 0xda, 0x33, 0xaf, 0xc5, 0xb5, 0x8a, 0x3c, 0xbf, 0x81, - 0x88, 0x4f, 0x12, 0xe4, 0xe8, 0xe6, 0x00, 0xb6, 0xd9, 0xcd, 0xb2, 0x70, - 0x08, 0x15, 0x72, 0xf6, 0x46, 0xc7, 0x98, 0x7c, 0x1d, 0x54, 0xd0, 0x66, - 0x2d, 0xa1, 0xd8, 0xda, 0xb0, 0xe5, 0x9f, 0xa3, 0x2f, 0x2c, 0xfb, 0x34, - 0xb3, 0x21, 0x8b, 0x61, 0xf4, 0xce, 0x60, 0x2b, 0xb5, 0x5e, 0x3d, 0x14, - 0x2c, 0xbe, 0x19, 0x9d, 0x5f, 0x01, 0xe1, 0x21, 0x34, 0x11, 0x6b, 0x10, - 0xd4, 0x17, 0x58, 0xb3, 0x0a, 0x30, 0xe4, 0x17, 0x51, 0x0b, 0xf2, 0xbb, - 0xa6, 0xb7, 0x00, 0xa2, 0xe8, 0xa5, 0xa3, 0x41, 0x1d, 0x65, 0x2d, 0x26, - 0x93, 0x26, 0x7d, 0xdc, 0xad, 0x6f, 0x83, 0xeb, 0x66, 0x55, 0xde, 0x60, - 0x21, 0x56, 0x19, 0x4f, 0x9b, 0x7b, 0x26, 0x4a, 0x80, 0xf5, 0xab, 0x8b, - 0xbf, 0xe4, 0xb1, 0xa1, 0xd6, 0x33, 0x32, 0xbf, 0x86, 0x8c, 0x3c, 0xd0, - 0x12, 0x03, 0xd4, 0xb9, 0x23, 0x54, 0x1b, 0x94, 0x2f, 0xa5, 0x34, 0x4d, - 0x59, 0x18, 0x33, 0x8e, 0x8c, 0xf7, 0x1f, 0xc9, 0x6d, 0x75, 0xfb, 0x2a, - 0x22, 0x6c, 0x64, 0xb7, 0x79, 0xd8, 0x3b, 0xf6, 0x4e, 0x98, 0xd8, 0xa8, - 0x2c, 0x06, 0xd1, 0x92, 0x32, 0x44, 0xec, 0x38, 0x40, 0x3b, 0x53, 0x16, - 0x40, 0x8f, 0x92, 0x72, 0x87, 0xa8, 0xb8, 0xc0, 0x8f, 0x25, 0x4c, 0x4f, - 0x24, 0xfc, 0x8d, 0xc6, 0xa6, 0xeb, 0x2f, 0xdf, 0x2f, 0x0d, 0x2f, 0xd3, - 0x6e, 0x70, 0x71, 0xfe, 0xf0, 0x2e, 0xe9, 0x84, 0xd3, 0xc1, 0xd1, 0x70, - 0x4b, 0x8f, 0x7b, 0x60, 0xb0, 0xb7, 0xe3, 0x79, 0x52, 0x6a, 0x6b, 0x26, - 0x03, 0x8f, 0x6a, 0x0f, 0x8d, 0x85, 0xd7, 0x5f, 0xf7, 0x39, 0x31, 0x0e, - 0x26, 0x73, 0x84, 0x3f, 0x9b, 0x10, 0x6f, 0x29, 0x63, 0x14, 0x36, 0xa2, - 0xec, 0x44, 0x7d, 0x84, 0xc6, 0x4a, 0xec, 0xfe, 0xac, 0xcb, 0xe4, 0xfa, - 0xf6, 0x68, 0x83, 0x68, 0xe0, 0x8f, 0xd3, 0x8a, 0x60, 0x73, 0xf1, 0x5c, - 0x71, 0x02, 0x0c, 0xa2, 0x88, 0x2c, 0xa2, 0x35, 0x35, 0x5c, 0x3f, 0xb1, - 0xbe, 0xb3, 0x6b, 0x5c, 0xe1, 0x78, 0x75, 0x40, 0x20, 0x87, 0x67, 0xca, - 0x07, 0x1c, 0x9c, 0x02, 0xc7, 0xf2, 0x9d, 0x1c, 0xda, 0x1b, 0x86, 0x1b, - 0xc6, 0xa6, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x93, 0xca, 0x30, 0xae, 0xea, 0x26, 0x6a, 0x1b, 0x15, 0x46, 0x0a, 0xe3, - 0x57, 0x23, 0x4c, 0x0c, 0x98, 0x8e, 0x3e, 0xbb, 0x43, 0x14, 0x73, 0xdf, - 0x17, 0x91, 0xe2, 0xee, 0x39, 0xf9, 0xc2, 0x2f, 0xdc, 0xad, 0x0e, 0x00, - 0xf5, 0xdd, 0xe3, 0x97, 0xba, 0x8c, 0xee, 0x53, 0xc4, 0x70, 0x37, 0x46, - 0xcf, 0x04, 0xc3, 0xc8, 0x56, 0x38, 0x2e, 0x39, 0x75, 0x32, 0x6d, 0x98, - 0xc4, 0x14, 0xae, 0xa4, 0x29, 0xa3, 0xc6, 0xb6, 0x66, 0x45, 0x48, 0xdf, - 0xc0, 0xa9, 0x4b, 0x4f, 0xef, 0xb9, 0xb4, 0x89, 0x0d, 0x64, 0x00, 0x5c, - 0xd1, 0xc8, 0x2b, 0xf7, 0xc5, 0x1a, 0x1b, 0x06, 0xb7, 0x49, 0xb1, 0xe3, - 0x4d, 0x87, 0xf9, 0x3f, 0xba, 0x39, 0xa3, 0x56, 0x7f, 0x43, 0xcc, 0x15, - 0x9c, 0x3d, 0xba, 0x71, 0x7b, 0xeb, 0x45, 0x0f, 0x15, 0x1b, 0x6c, 0x84, - 0x75, 0x6d, 0x43, 0x0b, 0x27, 0x12, 0x6b, 0xbc, 0x0a, 0x6d, 0xe4, 0xf6, - 0x4f, 0xc7, 0xbb, 0x9e, 0x91, 0xb5, 0x09, 0x5f, 0x79, 0x2a, 0xbf, 0xda, - 0x34, 0x91, 0x44, 0x47, 0x52, 0x64, 0x00, 0x89, 0x27, 0x17, 0x5c, 0xe9, - 0x90, 0x8b, 0xcb, 0xbe, 0x21, 0x47, 0x65, 0x1c, 0x54, 0x61, 0x48, 0x17, - 0x66, 0xb7, 0xa1, 0x60, 0x27, 0x31, 0x04, 0x42, 0x3b, 0x33, 0x3d, 0xda, - 0xf7, 0x61, 0x3d, 0x4b, 0x91, 0xa5, 0x74, 0x4b, 0xde, 0x16, 0xf2, 0x79, - 0x3e, 0xf7, 0x89, 0x87, 0xb3, 0xdd, 0xa2, 0x49, 0xd7, 0x54, 0x1b, 0x39, - 0xff, 0xb5, 0xec, 0x9d, 0x1d, 0x09, 0x7e, 0x5a, 0x3c, 0xd1, 0xdc, 0x0e, - 0x2a, 0x0e, 0x2c, 0x40, 0x4e, 0xa5, 0x8c, 0x9d, 0xc8, 0x9b, 0xa5, 0xb2, - 0x40, 0xa4, 0xaa, 0x3b, 0xac, 0x93, 0x19, 0xf7, 0xa1, 0x8b, 0xf8, 0x4a, - 0x40, 0x08, 0x5d, 0x1d, 0xb0, 0xae, 0x0f, 0x67, 0xa7, 0x21, 0xaf, 0xe3, - 0xb1, 0xfc, 0xff, 0xa0, 0x95, 0x66, 0x2b, 0xf7, 0x82, 0x2d, 0x8a, 0x26, - 0x0f, 0xc3, 0xed, 0x62, 0xb6, 0xcb, 0x4c, 0x86, 0xe9, 0x20, 0x78, 0x3f, - 0x08, 0x53, 0x8f, 0x41, 0xf1, 0xa1, 0x04, 0x77, 0xd9, 0xe6, 0xea, 0x26, - 0x6d, 0x33, 0x48, 0xb3, 0xbb, 0xed, 0xfc, 0xd7, 0xa3, 0x2b, 0xe2, 0x39, - 0xcf, 0x78, 0x4e, 0x11, 0x26, 0xad, 0x39, 0x83, 0x6e, 0x72, 0xbf, 0xc6, - 0x34, 0x23, 0x97, 0x5d, 0x7b, 0x64, 0x1e, 0x78, 0x00, 0x34, 0x92, 0x5d, - 0x3f, 0x23, 0x28, 0x60, 0x7f, 0x88, 0xf0, 0xca, 0x96, 0x4a, 0x15, 0xbf, - 0x8a, 0xb7, 0xd0, 0xd9, 0x99, 0x8b, 0xdb, 0x26, 0xdc, 0x7e, 0x8d, 0x35, - 0x53, 0x60, 0x07, 0x85, 0x80, 0xc4, 0x9c, 0x0d, 0x81, 0xe2, 0x93, 0x85, - 0x76, 0x2d, 0x85, 0x21, 0x6e, 0xda, 0x29, 0xe5, 0xb1, 0x08, 0x46, 0x09, - 0x1b, 0x8a, 0xd9, 0xd2, 0xd7, 0x16, 0x74, 0xee, 0x26, 0x3e, 0xc4, 0x8c, - 0x2e, 0x6b, 0x0c, 0xbc, 0x95, 0xea, 0x4a, 0xb2, 0xd6, 0x6f, 0x43, 0xd1, - 0x3a, 0x8f, 0xbd, 0x77, 0xb4, 0x67, 0x63, 0x6b, 0xd2, 0xe0, 0xf0, 0x81, - 0x74, 0xb7, 0xc5, 0x11, 0x60, 0x10, 0x6b, 0xc6, 0x0f, 0xfd, 0x84, 0x2e, - 0x5c, 0x8f, 0x3b, 0xf5, 0x68, 0xa7, 0x62, 0xc6, 0x4f, 0xa6, 0xee, 0x19, - 0x44, 0xea, 0xc0, 0xe4, 0x64, 0x12, 0x71, 0x2f, 0xfb, 0xa3, 0x4d, 0xb0, - 0x8e, 0x5e, 0xe1, 0x79, 0x65, 0xd4, 0xf3, 0xed, 0x73, 0x04, 0xf1, 0x6d, - 0xc6, 0x75, 0x54, 0x28, 0x13, 0xe2, 0xd6, 0xa1, 0x26, 0xf9, 0xa4, 0x29, - 0x20, 0x5b, 0xd0, 0x3c, 0x3d, 0xf3, 0x7a, 0x18, 0x9a, 0x3d, 0xec, 0x6a, - 0x4c, 0xfd, 0xa5, 0x00, 0xdf, 0xec, 0xfd, 0x64, 0x38, 0x66, 0xa7, 0xba, - 0x59, 0xb3, 0x9b, 0x9c, 0x44, 0xfb, 0x10, 0x08, 0xb8, 0x79, 0xea, 0x85, - 0xbf, 0xa4, 0x14, 0xce, 0xce, 0x85, 0x22, 0x3f, 0x16, 0x00, 0x1c, 0x57, - 0xc8, 0x5a, 0x1b, 0xf5, 0xff, 0xde, 0x7e, 0xa9, 0xcc, 0xf3, 0xb5, 0x1d, - 0x57, 0x06, 0xda, 0xbb, 0x6c, 0x0a, 0x1e, 0xd4, 0x09, 0x74, 0x84, 0x1d, - 0xfa, 0xdf, 0x33, 0x1e, 0xe2, 0x8f, 0x10, 0xf7, 0x73, 0xab, 0x71, 0xb8, - 0x64, 0xce, 0xc0, 0x49, 0xc0, 0x36, 0xd3, 0x39, 0x31, 0x4c, 0x12, 0x5b, - 0xf3, 0xf9, 0xb4, 0x2c, 0x88, 0xba, 0xd4, 0x1a, 0xbd, 0x0c, 0x99, 0xbd, - 0x0e, 0xad, 0x51, 0xe0, 0xca, 0xdb, 0x25, 0x66, 0x83, 0xe0, 0x55, 0x18, - 0xeb, 0xa6, 0x4e, 0x56, 0xcb, 0x2f, 0xa5, 0xf2, 0x42, 0x7a, 0xa1, 0x05, - 0xf0, 0x3a, 0x71, 0x5a, 0x78, 0x3a, 0x7a, 0x6d, 0x12, 0x9f, 0x43, 0xc5, - 0xcc, 0xb3, 0xfd, 0xf2, 0xbf, 0x05, 0x16, 0xef, 0x07, 0xf9, 0xde, 0x0d, - 0x51, 0xf0, 0x33, 0x86, 0x43, 0x57, 0x40, 0xbc, 0xa9, 0xbd, 0xa0, 0x23, - 0xff, 0xbb, 0xe6, 0x15, 0xa1, 0xeb, 0xe9, 0x78, 0x0d, 0x72, 0x76, 0xf2, - 0xb6, 0x6e, 0x46, 0xe2, 0x86, 0xab, 0x3c, 0x52, 0x2c, 0xc6, 0x77, 0xdd, - 0x57, 0xf7, 0x4d, 0x36, 0xbb, 0x41, 0x08, 0x21, 0xaa, 0xe6, 0x44, 0x50, - 0xed, 0xaf, 0x18, 0xb3, 0xdd, 0x6b, 0x57, 0x46, 0x9e, 0x44, 0x93, 0x20, - 0xe0, 0x62, 0x95, 0xcd, 0xcf, 0xe4, 0x96, 0x92, 0xc3, 0x0d, 0x16, 0xb2, - 0xc3, 0xf4, 0x0f, 0x3f, 0x87, 0x17, 0xb9, 0x7b, 0x60, 0x60, 0xfa, 0xfb, - 0x81, 0x5c, 0xb3, 0xb7, 0x89, 0x73, 0xf7, 0x35, 0xf7, 0x27, 0xf1, 0x0e, - 0xa4, 0xa1, 0xba, 0xea, 0x6a, 0xe3, 0x5c, 0x0f, 0xf7, 0x15, 0xbc, 0x28, - 0x57, 0x27, 0x8f, 0xd8, 0xca, 0x82, 0x19, 0xd0, 0xa3, 0x9d, 0xe5, 0xe0, - 0x44, 0xbf, 0x78, 0xa4, 0x09, 0x69, 0x27, 0xa0, 0x69, 0xb5, 0xd4, 0xbe, - 0x00, 0xe6, 0x03, 0x97, 0xbc, 0x8b, 0xfc, 0x25, 0x70, 0xb3, 0x49, 0x30, - 0xe3, 0x24, 0x19, 0x77, 0xb4, 0x93, 0x46, 0x03, 0xe6, 0x22, 0xaf, 0x76, - 0xd2, 0x90, 0x00, 0x05, 0x46, 0xb8, 0xa4, 0xf5, 0x4c, 0xaa, 0x04, 0x63, - 0xa0, 0x57, 0xe0, 0x20, 0x6e, 0x1a, 0xed, 0x21, 0x86, 0xd0, 0x38, 0x5b, - 0xe6, 0xa7, 0xb0, 0xe7, 0x75, 0xe3, 0x76, 0xb3, 0x15, 0x8b, 0xdc, 0x10, - 0x52, 0x15, 0x21, 0x7b, 0xd0, 0xc4, 0x75, 0x26, 0x1d, 0x6e, 0x0d, 0x4c, - 0x08, 0x5b, 0x95, 0x9a, 0xd0, 0xda, 0xbe, 0x23, 0x98, 0xde, 0x60, 0x2a, - 0xe9, 0xa4, 0x92, 0xf0, 0x92, 0x84, 0xdc, 0x86, 0x60, 0xf5, 0x23, 0x31, - 0xf5, 0xe9, 0xd6, 0x00, 0xc1, 0x78, 0xab, 0x05, 0x94, 0xd3, 0x47, 0x4d, - 0x32, 0x0f, 0x82, 0xa0, 0x99, 0x0b, 0xfe, 0x6b, 0x58, 0xf9, 0x24, 0xf6, - 0x17, 0xa0, 0x5f, 0x24, 0x6a, 0xc6, 0x01, 0xa8, 0xfa, 0xca, 0xdc, 0xb6, - 0x83, 0xcb, 0xd2, 0x3b, 0xb7, 0x0b, 0x04, 0x3e, 0x6a, 0xaf, 0x23, 0x17, - 0x3e, 0x14, 0xce, 0x52, 0x1c, 0xe3, 0x06, 0x66, 0x29, 0x17, 0x6f, 0x7e, - 0x66, 0x06, 0xa9, 0x68, 0x7f, 0xca, 0xad, 0xa8, 0xb7, 0x2d, 0xa4, 0x5d, - 0xa6, 0x16, 0xcd, 0xed, 0xee, 0x14, 0x96, 0xc8, 0x12, 0x69, 0x4e, 0x70, - 0x72, 0x2a, 0x75, 0x82, 0x08, 0x3f, 0x3e, 0x27, 0xa0, 0xea, 0x43, 0x84, - 0xa9, 0x9a, 0x91, 0x87, 0x4f, 0x20, 0x61, 0x55, 0x8d, 0x70, 0xad, 0x6c, - 0x59, 0x5d, 0x13, 0x80, 0xbb, 0x52, 0x55, 0x81, 0x8b, 0x59, 0x94, 0x0f, - 0xc2, 0x54, 0x79, 0x59, 0xe8, 0x9d, 0x58, 0xe5, 0x91, 0x10, 0xb3, 0xef, - 0x1c, 0xda, 0xaa, 0xdd, 0x91, 0x0b, 0xb0, 0x14, 0x3b, 0xad, 0x02, 0x98, - 0x40, 0x3c, 0x54, 0xc4, 0x23, 0xb9, 0x40, 0x54, 0x7e, 0x88, 0x10, 0x3e, - 0x24, 0xe5, 0xf6, 0xdf, 0x5c, 0x9e, 0x7a, 0x9f, 0xd0, 0xff, 0x5e, 0x9c, - 0xb6, 0x30, 0x17, 0x94, 0xd2, 0xaa, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x96, 0xff, 0x2f, 0x01, 0x60, 0x2c, 0x1b, 0xe3, - 0xc6, 0xcb, 0xa4, 0x41, 0xa1, 0x44, 0x13, 0x14, 0xe2, 0x44, 0x77, 0x1c, - 0x96, 0xe8, 0xe6, 0x4f, 0x70, 0x99, 0x3a, 0xef, 0xa1, 0x6f, 0x1f, 0x7f, - 0xb9, 0xe9, 0x1e, 0x35, 0x37, 0x5b, 0x94, 0x90, 0x78, 0xcc, 0x8d, 0xcd, - 0x6c, 0x9f, 0xf6, 0x73, 0xed, 0x23, 0xa2, 0x28, 0x64, 0x58, 0x50, 0x64, - 0x05, 0xbc, 0xc9, 0x9b, 0x5a, 0xec, 0x3f, 0x2b, 0x61, 0xcf, 0xa7, 0x35, - 0x56, 0x8c, 0x77, 0x68, 0xd6, 0xcf, 0x9b, 0xc5, 0x62, 0xee, 0x3a, 0xb2, - 0xfe, 0x78, 0xba, 0x02, 0xe7, 0x26, 0x8a, 0x89, 0x30, 0x19, 0xcc, 0xb0, - 0x98, 0xbf, 0x30, 0x2c, 0xae, 0x13, 0x6c, 0x93, 0x86, 0x19, 0x84, 0x13, - 0x01, 0x2f, 0x39, 0x4e, 0x33, 0xd1, 0x15, 0x99, 0xf7, 0x1e, 0xb8, 0x86, - 0xdb, 0xb6, 0xf9, 0x56, 0x42, 0x0e, 0x4a, 0xb1, 0x5e, 0xf0, 0x9a, 0x06, - 0x5e, 0xab, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, - 0xcd, 0xde, 0xad, 0x40, 0x34, 0xcd, 0x79, 0x0a, 0x29, 0x84, 0x05, 0x3f, - 0xb5, 0xbe, 0x49, 0x84, 0x43, 0xcc, 0xa6, 0xe3, 0xe9, 0xdc, 0x84, 0x14, - 0xe7, 0xb3, 0x1b, 0x96, 0xe8, 0xda, 0x35, 0x15, 0x38, 0xf5, 0xb3, 0xb5, - 0x91, 0xc3, 0xc3, 0x94, 0xc6, 0x79, 0xeb, 0xf5, 0x22, 0x78, 0xf0, 0x0b, - 0xda, 0xb0, 0x91, 0xa7, 0x43, 0x71, 0x8e, 0xa6, 0x52, 0x0f, 0x81, 0x06, - 0xc8, 0xdf, 0xb5, 0x1f, 0x92, 0xb0, 0xfe, 0x93, 0x38, 0x4c, 0xf4, 0x17, - 0x66, 0x31, 0xea, 0x08, 0x72, 0xb9, 0xaa, 0xfd, 0x40, 0x8d, 0xbf, 0x56, - 0x19, 0xb1, 0xb5, 0x8e, 0x4e, 0x4e, 0x73, 0x7f, 0x4b, 0x0c, 0x70, 0x94, - 0x7c, 0x9f, 0xfc, 0x23, 0x35, 0xba, 0xd2, 0x23, 0x88, 0x1d, 0x83, 0x28, - 0x45, 0xd7, 0x1b, 0x63, 0xfb, 0x36, 0x86, 0x06, 0xf3, 0x99, 0x81, 0x6e, - 0xd7, 0xf1, 0xd4, 0x53, 0x6d, 0x30, 0x3c, 0x8d, 0xac, 0xc6, 0x9a, 0xd5, - 0xe8, 0x4f, 0x11, 0x58, 0xba, 0xfd, 0x67, 0x06, 0xe7, 0x1a, 0xb4, 0xa1, - 0x45, 0x13, 0xf2, 0x3b, 0xdc, 0x71, 0xf0, 0xc6, 0x53, 0xfc, 0x8b, 0x2f, - 0x14, 0xe4, 0xe0, 0xd6, 0x8c, 0x96, 0x4c, 0x48, 0xc0, 0x30, 0x6e, 0x00, - 0x0f, 0x42, 0xfe, 0xa7, 0x9d, 0x0f, 0xf2, 0x52, 0x58, 0xf9, 0x35, 0x33, - 0x99, 0xda, 0xd5, 0x9d, 0x61, 0x26, 0x6b, 0x80, 0xff, 0x08, 0x51, 0x54, - 0x26, 0xfa, 0x8d, 0xfc, 0x67, 0x60, 0x93, 0x0e, 0xcd, 0x78, 0x41, 0x5a, - 0x31, 0x47, 0x14, 0xb0, 0x65, 0x89, 0x30, 0xcb, 0x0c, 0xc5, 0xa0, 0x37, - 0xa8, 0xe0, 0xcf, 0x24, 0xa4, 0x2f, 0xad, 0xa7, 0x9c, 0xa2, 0xe8, 0x81, - 0x17, 0xbe, 0x2f, 0xd5, 0xd1, 0xa8, 0xff, 0x9d, 0x5e, 0x7f, 0xd9, 0x6c, - 0x56, 0xe6, 0xc4, 0x60, 0x8d, 0xa5, 0x47, 0x5e, 0x43, 0x1e, 0x34, 0x23, - 0xb3, 0x6a, 0xdf, 0x6c, 0xf8, 0xd1, 0x85, 0x11, 0xaa, 0x74, 0x85, 0x71, - 0x27, 0xc5, 0x80, 0x37, 0x60, 0xb4, 0x2b, 0x53, 0x5a, 0xc4, 0x35, 0xd1, - 0xe8, 0x4b, 0x01, 0x58, 0x1f, 0xdb, 0x73, 0xf3, 0x2c, 0x8b, 0xbb, 0x17, - 0x36, 0x76, 0x35, 0x6b, 0xa0, 0x82, 0x47, 0xf5, 0x16, 0x21, 0x41, 0x43, - 0xc9, 0x1f, 0x53, 0xf9, 0xe9, 0x47, 0xf0, 0x9c, 0x6d, 0xe3, 0x23, 0x59, - 0x74, 0xdc, 0x1a, 0x8f, 0x4e, 0x6c, 0x71, 0x83, 0x7e, 0xd0, 0x2b, 0x50, - 0x44, 0x86, 0x5f, 0xbf, 0x60, 0x92, 0xeb, 0x9a, 0x9b, 0xa2, 0xc9, 0x2b, - 0xa8, 0xc4, 0x77, 0x4e, 0x3f, 0xf8, 0xa6, 0x39, 0x50, 0x5c, 0x7e, 0x2a, - 0x70, 0xb0, 0x5d, 0x28, 0xb2, 0x81, 0xa9, 0xaf, 0x16, 0x5e, 0x27, 0xeb, - 0x03, 0x0e, 0x82, 0xad, 0x28, 0x51, 0x16, 0xd1, 0xf4, 0x58, 0x75, 0x1a, - 0xf9, 0x6a, 0xbf, 0x73, 0xd7, 0x84, 0x07, 0x7f, 0x4c, 0x4e, 0x29, 0x02, - 0x9b, 0x60, 0x81, 0x85, 0xa9, 0xbf, 0xc7, 0xa0, 0x8f, 0x8a, 0xdc, 0xa4, - 0xc5, 0x17, 0x51, 0x24, 0x15, 0x28, 0x9e, 0x5e, 0x78, 0x84, 0x21, 0x02, - 0xca, 0x26, 0x61, 0x4e, 0x95, 0xa6, 0x8d, 0xa6, 0x98, 0x7d, 0x1f, 0x84, - 0x19, 0x24, 0x8b, 0x31, 0x76, 0x89, 0x2a, 0x5f, 0xa9, 0xfb, 0xaa, 0x8a, - 0x8c, 0xce, 0xe4, 0x30, 0xd6, 0xec, 0x5b, 0x39, 0xb7, 0x09, 0x80, 0x23, - 0x4c, 0xe1, 0x6e, 0x8f, 0x7c, 0x10, 0xe8, 0x8a, 0x60, 0x35, 0xd7, 0xa3, - 0xe0, 0x5f, 0xcd, 0xfa, 0x3d, 0x8f, 0xd8, 0x5d, 0xec, 0xc9, 0xc5, 0xa0, - 0x73, 0x41, 0x89, 0xe5, 0x39, 0xf2, 0x42, 0xff, 0x08, 0xa0, 0x12, 0xb7, - 0x4a, 0x5e, 0x46, 0x06, 0x31, 0xbd, 0x88, 0x5e, 0x9e, 0x05, 0x17, 0x51, - 0xb3, 0xe7, 0x88, 0x10, 0x19, 0x32, 0xff, 0x8a, 0x1e, 0xce, 0x66, 0xbc, - 0x84, 0x1f, 0xed, 0x52, 0x52, 0x77, 0xe1, 0x5e, 0xa6, 0x21, 0xe4, 0xad, - 0x59, 0xca, 0xa3, 0x77, 0xea, 0x66, 0x28, 0x15, 0x73, 0x3a, 0xfd, 0xe4, - 0x75, 0x46, 0x99, 0x59, 0x5c, 0x7a, 0x9b, 0x9d, 0x11, 0xb4, 0x76, 0x45, - 0x06, 0x45, 0x41, 0x1e, 0x94, 0xb7, 0xd9, 0xb8, 0xcb, 0xbf, 0x71, 0xec, - 0xba, 0x9f, 0x4a, 0x1b, 0xbc, 0xfd, 0x5c, 0x06, 0x64, 0xfd, 0x31, 0x52, - 0xc0, 0xe4, 0xa7, 0x21, 0x2f, 0x22, 0x92, 0xf0, 0x51, 0x33, 0x92, 0x1d, - 0x40, 0x3c, 0x01, 0x81, 0x3b, 0xa8, 0x2e, 0x4e, 0xb6, 0x60, 0xcd, 0xd4, - 0x36, 0x3b, 0x2e, 0x1d, 0x5e, 0x43, 0xd9, 0x94, 0xf1, 0x51, 0xd3, 0x59, - 0x94, 0x6a, 0xd5, 0x5f, 0x1f, 0xd3, 0xa6, 0x55, 0xda, 0x15, 0xf1, 0x3e, - 0x2c, 0x60, 0xb8, 0xc3, 0xda, 0x0e, 0x56, 0x53, 0xea, 0xcd, 0x39, 0x27, - 0x94, 0x86, 0x94, 0xb2, 0x5b, 0xd8, 0x9a, 0x12, 0x94, 0xb0, 0xb6, 0x77, - 0x28, 0xba, 0xde, 0xb6, 0x60, 0x4d, 0x2b, 0x6e, 0x3d, 0xf6, 0xf1, 0x48, - 0xf7, 0x77, 0xa1, 0x49, 0xe0, 0x9f, 0x1e, 0xc9, 0xe6, 0xcb, 0x95, 0x26, - 0x61, 0x5a, 0xc9, 0xed, 0x49, 0x40, 0x17, 0x57, 0x15, 0xfc, 0x3c, 0xb8, - 0x28, 0x79, 0xb8, 0x42, 0x2a, 0xf9, 0xd4, 0x19, 0xb9, 0x5f, 0x41, 0xc2, - 0x25, 0xd7, 0x88, 0x34, 0xb3, 0x25, 0x4e, 0xca, 0xff, 0x9e, 0x59, 0x9a, - 0x33, 0xc8, 0x12, 0xf9, 0xd5, 0x70, 0xc0, 0x8b, 0x43, 0x13, 0xc4, 0x8d, - 0x45, 0x99, 0xaa, 0xd7, 0xeb, 0xb1, 0xe9, 0xb7, 0x5b, 0xab, 0x48, 0xd1, - 0x26, 0x60, 0x8c, 0x13, 0x55, 0x8a, 0x41, 0xd3, 0x68, 0x58, 0xd4, 0xa6, - 0x30, 0x6e, 0x88, 0x3e, 0x81, 0x6e, 0x61, 0x06, 0x13, 0x66, 0xd5, 0x8e, - 0x5d, 0x87, 0x4f, 0xd9, 0xb1, 0x66, 0xb3, 0xc5, 0x88, 0xa9, 0xc0, 0x73, - 0xcb, 0x7f, 0x42, 0xec, 0x96, 0x64, 0xad, 0x72, 0x85, 0x72, 0xaf, 0xeb, - 0xa9, 0xc4, 0x17, 0x86, 0xab, 0xe7, 0x23, 0xd7, 0x96, 0xf7, 0xb2, 0xb3, - 0x51, 0xe1, 0x9a, 0x3b, 0x0e, 0xaf, 0x89, 0xca, 0x7b, 0xf1, 0x70, 0x7b, - 0xc7, 0x82, 0xfc, 0xc7, 0x6c, 0x37, 0xd9, 0x7b, 0x82, 0x0f, 0x94, 0xcf, - 0xd1, 0xa9, 0x33, 0xc2, 0xa4, 0xab, 0xed, 0xad, 0xee, 0x64, 0x5d, 0x04, - 0xf2, 0xcb, 0x8e, 0x99, 0x22, 0x33, 0x69, 0x85, 0x85, 0xb6, 0x1a, 0x9b, - 0x09, 0x18, 0xbe, 0xcd, 0x63, 0xf6, 0x5d, 0x52, 0xbc, 0x26, 0x99, 0x3e, - 0x52, 0xe5, 0x0c, 0xc5, 0xee, 0xdd, 0xbb, 0x07, 0xbc, 0x38, 0xc1, 0x67, - 0x96, 0x8c, 0xe6, 0xe4, 0x18, 0xfa, 0x07, 0x91, 0x48, 0xef, 0x9c, 0x70, - 0x9d, 0x5b, 0x1c, 0x0e, 0xd5, 0xd3, 0x59, 0xee, 0x44, 0x13, 0xf7, 0x00, - 0xa6, 0x20, 0xad, 0x65, 0x1d, 0xb7, 0x96, 0x2f, 0x79, 0x7b, 0x04, 0xa3, - 0x10, 0x90, 0x29, 0x8c, 0xa3, 0x2e, 0x14, 0x39, 0xd3, 0xe4, 0x6e, 0x46, - 0xf7, 0x6e, 0x96, 0x68, 0xd9, 0xef, 0x45, 0xf7, 0x3c, 0xcd, 0xc7, 0xca, - 0x33, 0x64, 0x8e, 0x31, 0x80, 0x48, 0x7b, 0x7c, 0x81, 0x9a, 0x48, 0xff, - 0xd5, 0x0d, 0x74, 0xe7, 0x77, 0x46, 0x61, 0x9b, 0xde, 0xed, 0x83, 0xe9, - 0x4f, 0x92, 0xc1, 0x16, 0xad, 0x44, 0x40, 0x23, 0xce, 0x04, 0x31, 0xbf, - 0xcf, 0xe2, 0x5a, 0x68, 0x5a, 0xf4, 0x0f, 0xe1, 0x87, 0x79, 0xb0, 0x32, - 0x0b, 0x09, 0x6b, 0x72, 0x2b, 0x16, 0x06, 0x67, 0x82, 0x0b, 0x92, 0x35, - 0xdb, 0x4c, 0xe2, 0x4a, 0x60, 0x99, 0xaf, 0x52, 0x10, 0x4b, 0xa5, 0xcf, - 0xac, 0x66, 0x49, 0x56, 0x04, 0xc0, 0xd6, 0x6f, 0x62, 0x53, 0x6f, 0xcb, - 0x62, 0xe9, 0xa5, 0xca, 0x18, 0x8e, 0x86, 0x3f, 0x36, 0xfd, 0xea, 0x55, - 0x16, 0x6d, 0x6c, 0x6a, 0x8f, 0xa7, 0x9c, 0x70, 0x15, 0xd7, 0xf4, 0x57, - 0x68, 0x04, 0x84, 0x60, 0x3b, 0xb0, 0x32, 0xc4, 0xea, 0x9d, 0x70, 0xb9, - 0xa6, 0x34, 0xe5, 0xfa, 0xa1, 0x24, 0x54, 0x7f, 0xef, 0xac, 0xb4, 0x5f, - 0xa0, 0xc0, 0x40, 0x3f, 0x73, 0xdf, 0x56, 0xa6, 0xd9, 0x17, 0xf4, 0xff, - 0x50, 0xae, 0x21, 0x0d, 0x5a, 0xe0, 0xb0, 0xf9, 0x5b, 0x7a, 0x61, 0x6e, - 0xa6, 0x85, 0x85, 0xbf, 0x19, 0x03, 0xe2, 0x74, 0x1f, 0x03, 0x70, 0x76, - 0x3c, 0xed, 0x02, 0x7d, 0xfa, 0xf9, 0x1e, 0x17, 0xdd, 0x42, 0x30, 0xf0, - 0x32, 0x47, 0x46, 0xae, 0xf5, 0x64, 0xe6, 0x5e, 0x2b, 0x40, 0x86, 0x97, - 0xb1, 0x24, 0x52, 0x69, 0x67, 0x79, 0x8e, 0x0d, 0xcc, 0x07, 0xcb, 0x72, - 0x29, 0xe9, 0xba, 0x2d, 0xf7, 0xcb, 0xe3, 0x86, 0x06, 0xaa, 0x6d, 0x79, - 0xf8, 0xb6, 0x93, 0x0a, 0x9c, 0x97, 0xef, 0x47, 0x37, 0x13, 0x2e, 0x6b, - 0xfd, 0x59, 0x0c, 0xc9, 0x5e, 0x5e, 0xcd, 0x71, 0x6f, 0x99, 0x0d, 0x88, - 0x9d, 0xbb, 0x7c, 0x2b, 0x22, 0xd5, 0xbe, 0xee, 0x26, 0x1c, 0xe1, 0xad, - 0xc8, 0x4d, 0x5f, 0x6b, 0xd1, 0xf4, 0x30, 0x4d, 0x46, 0x1d, 0x54, 0x11, - 0x4b, 0xa0, 0x7f, 0x94, 0x71, 0xc0, 0x44, 0x4a, 0x42, 0x11, 0xf5, 0x89, - 0xec, 0xb5, 0x24, 0x45, 0xf1, 0xf0, 0x30, 0x54, 0xf8, 0x62, 0xdb, 0x58, - 0x3d, 0x7c, 0x2a, 0x82, 0xe5, 0xbe, 0x13, 0xcf, 0xdc, 0x88, 0xfb, 0xd3, - 0x1e, 0x4d, 0xa5, 0x3e, 0xad, 0x95, 0xa2, 0xe6, 0x48, 0x73, 0xb2, 0xbe, - 0x96, 0xef, 0x8e, 0x0b, 0x28, 0xf9, 0xbe, 0x2a, 0xd6, 0x68, 0x9e, 0x9c, - 0x7b, 0x5a, 0xaf, 0x20, 0xf6, 0xa5, 0x3f, 0x99, 0x61, 0x57, 0xe8, 0x1c, - 0xb2, 0xc3, 0xd0, 0x7f, 0x2c, 0xb5, 0xe9, 0x66, 0x8e, 0x88, 0xec, 0x13, - 0x51, 0xbc, 0x8e, 0xb6, 0xe2, 0x91, 0xbf, 0x5e, 0x8c, 0x1c, 0xdd, 0x0e, - 0x0a, 0x13, 0x06, 0xc6, 0x62, 0x1c, 0x41, 0x8d, 0xa1, 0xc0, 0xf2, 0xfa, - 0x76, 0x35, 0xaa, 0x77, 0x06, 0x3f, 0x76, 0x50, 0xf6, 0x43, 0xf2, 0x25, - 0x00, 0x79, 0xde, 0xca, 0xa1, 0x06, 0x6f, 0xb4, 0x17, 0x4b, 0x99, 0x5a, - 0x00, 0x32, 0xd6, 0xb0, 0x1f, 0x80, 0x53, 0x16, 0xaa, 0x87, 0x72, 0xa2, - 0x34, 0xaf, 0x90, 0x3d, 0x60, 0xde, 0x0e, 0x6d, 0x83, 0xda, 0xb2, 0x11, - 0x2f, 0x39, 0xdc, 0x1a, 0xfe, 0x51, 0x74, 0x10, 0x3c, 0x41, 0xd5, 0x41, - 0x65, 0x4a, 0xa0, 0x11, 0xde, 0x95, 0x34, 0xef, 0xa0, 0xc9, 0xa8, 0xd3, - 0xcb, 0xb9, 0x7d, 0x51, 0x7d, 0xff, 0x26, 0x88, 0xd8, 0x29, 0x0e, 0xa0, - 0xd4, 0xa7, 0x07, 0x33, 0xe7, 0x7d, 0x59, 0x9f, 0x35, 0xc1, 0xb5, 0xf7, - 0x78, 0x78, 0x84, 0xf0, 0x20, 0x41, 0x3f, 0x02, 0x7d, 0x41, 0x90, 0x01, - 0x8d, 0xa4, 0xd8, 0xd7, 0xeb, 0x56, 0x7f, 0x38, 0xbc, 0x1e, 0x15, 0xdf, - 0xfc, 0x34, 0xe7, 0x99, 0xd4, 0x92, 0xd5, 0xf3, 0x9e, 0x16, 0x0b, 0x5c, - 0xeb, 0xb6, 0x78, 0xac, 0x84, 0x06, 0x8e, 0xfe, 0xd0, 0x7c, 0xce, 0x4a, - 0x43, 0x49, 0x3b, 0xe1, 0xab, 0x57, 0xc0, 0x12, 0xd6, 0x9d, 0xa4, 0xee, - 0x91, 0x10, 0x81, 0xe2, 0xfc, 0x02, 0x26, 0x7a, 0xca, 0x81, 0x5b, 0x2f, - 0x34, 0x51, 0xdd, 0x25, 0x4d, 0xc8, 0xf9, 0x3e, 0x59, 0x0f, 0x3d, 0x64, - 0x51, 0xbf, 0x42, 0xc4, 0x92, 0x9d, 0x8f, 0x39, 0x8a, 0x31, 0x09, 0x24, - 0x19, 0x44, 0xc0, 0xf4, 0xea, 0xca, 0x59, 0xcb, 0x86, 0x6c, 0x02, 0x7a, - 0xe5, 0x30, 0x79, 0xe2, 0x2c, 0x76, 0x08, 0x8f, 0x98, 0x0d, 0x4d, 0x12, - 0xc3, 0x98, 0xb4, 0x24, 0x04, 0x4f, 0x51, 0xec, 0x4e, 0xec, 0xbd, 0x8c, - 0xc4, 0x79, 0x51, 0x7f, 0xe1, 0xce, 0x76, 0x28, 0x0b, 0x7b, 0xc5, 0x3f, - 0x5b, 0x48, 0x19, 0x76, 0x68, 0x31, 0x8e, 0x28, 0xff, 0x18, 0x24, 0xe3, - 0x91, 0xe7, 0x49, 0x0d, 0x10, 0xbd, 0x00, 0xc6, 0x58, 0xfd, 0xb6, 0x88, - 0x63, 0xbd, 0xb4, 0x4b, 0xb8, 0xed, 0xdd, 0xb7, 0x53, 0xce, 0x89, 0xdb, - 0x7f, 0xf4, 0xc3, 0x21, 0x31, 0xad, 0x20, 0x78, 0x06, 0x71, 0xaf, 0xc0, - 0xe3, 0xdc, 0xb8, 0xf4, 0x80, 0xc8, 0x33, 0x1d, 0x8b, 0xff, 0x5a, 0x92, - 0x68, 0x4d, 0xc1, 0x5b, 0x58, 0x3e, 0xf6, 0x7f, 0xba, 0x42, 0xa5, 0x6d, - 0xec, 0x03, 0x36, 0xc9, 0x3f, 0x83, 0x1f, 0x0c, 0x33, 0x57, 0x6a, 0x43, - 0x5f, 0x11, 0x72, 0x19, 0x2c, 0xda, 0x71, 0x58, 0xf2, 0x50, 0x50, 0x06, - 0x97, 0xd0, 0xdf, 0xd1, 0x4f, 0x0b, 0x00, 0x1a, 0xea, 0x85, 0x3b, 0x37, - 0x2f, 0xf0, 0x40, 0x52, 0xd9, 0x2a, 0xe8, 0x54, 0xa5, 0xee, 0x0f, 0x49, - 0x74, 0x39, 0x96, 0x5d, 0x60, 0x8f, 0x14, 0x59, 0x86, 0x59, 0x86, 0xfb, - 0x67, 0x71, 0x5c, 0x26, 0x5f, 0xe9, 0xab, 0x32, 0x77, 0x83, 0xdf, 0x02, - 0x19, 0x85, 0xae, 0x4d, 0x7d, 0x9c, 0x8d, 0x4f, 0x61, 0x05, 0x3c, 0x0c, - 0xc6, 0x74, 0x9e, 0x36, 0x33, 0xb8, 0x14, 0x85, 0xab, 0xa2, 0x0b, 0x5d, - 0x22, 0xf2, 0x50, 0x3e, 0xa4, 0x88, 0xac, 0x67, 0xf9, 0x06, 0xe5, 0x30, - 0x8e, 0xf9, 0x67, 0x34, 0xd5, 0x94, 0x5b, 0x35, 0xb7, 0x3d, 0x39, 0x5f, - 0x4e, 0xae, 0xfe, 0xf7, 0x57, 0xd3, 0x95, 0x7b, 0x0a, 0xd9, 0x92, 0x4a, - 0x66, 0x29, 0xa0, 0x18, 0x35, 0x54, 0x14, 0x44, 0x79, 0x72, 0xc3, 0xbc, - 0xa8, 0x1a, 0xd3, 0xa3, 0xbe, 0x6f, 0x9e, 0xcc, 0x68, 0xb6, 0x5f, 0xd4, - 0x42, 0xab, 0xe8, 0x09, 0x60, 0x57, 0x2e, 0xb2, 0x9a, 0x5b, 0x62, 0x38, - 0xfb, 0x0a, 0x35, 0x9c, 0x4f, 0xf7, 0xe0, 0xd2, 0x06, 0x04, 0x1f, 0x79, - 0x7f, 0xa7, 0x7b, 0xd3, 0x63, 0xc9, 0xbd, 0x16, 0x58, 0x38, 0x7b, 0xaa, - 0x08, 0xf3, 0x14, 0x6c, 0x25, 0xf8, 0xa5, 0xe9, 0x4b, 0x45, 0x34, 0x89, - 0x76, 0x74, 0xcb, 0x41, 0x9c, 0x2a, 0xd9, 0xca, 0xb3, 0x12, 0x46, 0x6d, - 0x85, 0x4d, 0x63, 0x2d, 0x24, 0x1b, 0x19, 0x6b, 0x3f, 0x61, 0x6b, 0x4b, - 0x15, 0x83, 0x2d, 0x8f, 0x61, 0xab, 0xd1, 0x55, 0x93, 0x4e, 0x26, 0xd6, - 0x7a, 0x0a, 0x8a, 0xff, 0x58, 0x44, 0xf7, 0x39, 0x31, 0x1a, 0xab, 0xa6, - 0x98, 0x31, 0x41, 0x03, 0xb6, 0xc9, 0xf5, 0x50, 0xe3, 0x7b, 0xc0, 0x59, - 0x74, 0x60, 0x91, 0xb4, 0x79, 0x02, 0x25, 0xc1, 0xb5, 0xbd, 0xcb, 0x6e, - 0x40, 0x61, 0xfe, 0x68, 0x29, 0x83, 0x1b, 0xd2, 0x49, 0xe1, 0x31, 0xde, - 0xdd, 0x53, 0xb0, 0xb8, 0x96, 0xa2, 0xce, 0xea, 0x8b, 0x66, 0x2c, 0x5a, - 0x80, 0x51, 0x0b, 0xc1, 0x2d, 0x9a, 0xfa, 0x9d, 0xc6, 0xcc, 0x2b, 0xbb, - 0xaa, 0xce, 0x98, 0xaa, 0x26, 0x15, 0x8f, 0x4a, 0xe7, 0xdb, 0x17, 0x6c, - 0xe5, 0x58, 0xc9, 0xae, 0xe4, 0x9c, 0x1d, 0xab, 0x59, 0x84, 0x3e, 0x27, - 0x76, 0x03, 0xe3, 0x82, 0x64, 0x6f, 0x6e, 0x6f, 0x63, 0xd2, 0x12, 0x84, - 0xe3, 0x9b, 0x9d, 0x7e, 0x53, 0x1a, 0x54, 0x8d, 0xc1, 0xf0, 0x94, 0xae, - 0xad, 0x8f, 0x6a, 0x12, 0x4e, 0xa7, 0x30, 0xdb, 0x55, 0xbe, 0x09, 0xe2, - 0x56, 0x08, 0xc4, 0x3a, 0xb0, 0x55, 0xb0, 0x24, 0x96, 0xa6, 0x3e, 0x28, - 0xd0, 0x35, 0xfb, 0x58, 0x47, 0xba, 0x2d, 0x51, 0xbb, 0x72, 0x20, 0x59, - 0xd2, 0xdd, 0x9c, 0xe2, 0xb5, 0x31, 0x90, 0xac, 0x74, 0x5d, 0x9f, 0x3d, - 0x8c, 0x1c, 0x96, 0xc0, 0x60, 0x61, 0xa8, 0xbb, 0x3c, 0xb3, 0x6d, 0x6d, - 0x92, 0x4a, 0xca, 0xbb, 0x60, 0x5e, 0x82, 0x0d, 0x7f, 0xab, 0x4b, 0x36, - 0x4c, 0x93, 0x0d, 0x88, 0x71, 0xaf, 0xb6, 0x53, 0xb0, 0x38, 0xb4, 0x1c, - 0xb4, 0x7b, 0xd4, 0x13, 0x32, 0x6c, 0xe4, 0xee, 0x6a, 0xb3, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x88, 0x83, 0x91, 0x4c, - 0x2e, 0x1e, 0xbe, 0xa4, 0xb5, 0x96, 0xff, 0x67, 0x50, 0xe9, 0x81, 0x0e, - 0x5d, 0x0e, 0xad, 0xc4, 0x1f, 0xeb, 0x98, 0x38, 0xcc, 0x54, 0x9d, 0x27, - 0xa6, 0xf1, 0x37, 0x23, 0xce, 0xb4, 0x5b, 0xff, 0x12, 0xb1, 0xb8, 0x35, - 0x5e, 0x03, 0x02, 0x04, 0xad, 0xa6, 0x6f, 0x43, 0xfc, 0xe4, 0xbe, 0x0c, - 0xe0, 0x93, 0xd5, 0xef, 0x09, 0xfa, 0x04, 0xe9, 0x5a, 0x22, 0xd4, 0x81, - 0xc1, 0x27, 0x4f, 0x5f, 0x6e, 0x83, 0x5a, 0x8a, 0x2d, 0xbb, 0x8f, 0xa4, - 0x91, 0xcc, 0x82, 0x37, 0x3b, 0x14, 0x98, 0x58, 0x86, 0x44, 0xb7, 0xa9, - 0x58, 0xf3, 0x3d, 0x49, 0x71, 0x7a, 0x37, 0xcd, 0xc5, 0xb9, 0xc9, 0x46, - 0xd5, 0xd4, 0x17, 0x60, 0x1a, 0xbf, 0x93, 0xa9, 0xe9, 0x08, 0x25, 0x40, - 0xd1, 0x65, 0xae, 0xdd, 0x85, 0xa6, 0xcc, 0x06, 0xca, 0x91, 0xe1, 0x63, - 0xf9, 0x6b, 0x15, 0xa8, 0x04, 0x61, 0xd2, 0xa6, 0x59, 0x21, 0x1a, 0x1c, - 0xc9, 0xa9, 0xa9, 0xc8, 0x54, 0x86, 0xac, 0xa5, 0xd6, 0x95, 0x39, 0x83, - 0x4b, 0x6b, 0x69, 0xa6, 0x94, 0xd8, 0xc0, 0xfb, 0x66, 0x0f, 0x3a, 0xbe, - 0xc7, 0xf3, 0xcc, 0xd5, 0xb7, 0x1b, 0x60, 0x02, 0x95, 0x45, 0x4a, 0x12, - 0xc9, 0xfe, 0x75, 0x7c, 0x1b, 0xb2, 0x86, 0x96, 0x28, 0x07, 0xa2, 0x18, - 0x7a, 0x6c, 0x90, 0x6f, 0x32, 0x0c, 0xc8, 0x34, 0xbc, 0x75, 0x4d, 0x96, - 0x03, 0xa6, 0x0f, 0x3d, 0x35, 0x1b, 0x64, 0x76, 0x95, 0x55, 0xff, 0x25, - 0xd4, 0x71, 0xcf, 0x8a, 0x73, 0x6d, 0x9b, 0x74, 0xfe, 0xff, 0x9e, 0x31, - 0x9e, 0x5e, 0x89, 0x5a, 0x1a, 0xeb, 0x8d, 0x06, 0x3b, 0xf2, 0xf6, 0x06, - 0x5d, 0xc3, 0xba, 0x04, 0xca, 0x0f, 0x07, 0x2c, 0xbd, 0x54, 0x52, 0xd9, - 0x1c, 0x2f, 0x0e, 0x13, 0x5e, 0x25, 0x13, 0xe5, 0xd7, 0x8e, 0x19, 0x42, - 0x1b, 0x52, 0x2e, 0xd2, 0x8f, 0xc5, 0x8e, 0x1c, 0x34, 0x2e, 0x4d, 0xd5, - 0x51, 0x7d, 0x91, 0x64, 0xbc, 0xb4, 0x0d, 0xc9, 0xe7, 0x1c, 0x6c, 0x47, - 0xe9, 0xbb, 0x67, 0x9a, 0x96, 0xde, 0xad, 0xff, 0xba, 0x35, 0x25, 0x6d, - 0x57, 0xa1, 0x93, 0xfe, 0xe2, 0x8d, 0x02, 0xeb, 0xf0, 0x2f, 0x54, 0xfd, - 0x46, 0xc0, 0x8f, 0xea, 0x32, 0x7b, 0x57, 0xda, 0xe0, 0x29, 0x1c, 0x19, - 0xba, 0xa4, 0xa6, 0x1c, 0x6e, 0xeb, 0x7a, 0xa8, 0x8a, 0xe1, 0xc6, 0x12, - 0xf5, 0xa3, 0x24, 0x1a, 0x96, 0xe1, 0x02, 0xc0, 0xf4, 0x7d, 0x14, 0x72, - 0xd6, 0x12, 0x8e, 0x6c, 0x8c, 0xd2, 0xfd, 0x88, 0x78, 0x48, 0xf3, 0x74, - 0x38, 0x86, 0x04, 0x68, 0x6d, 0x7c, 0xf4, 0x4c, 0x40, 0x17, 0xf6, 0x8f, - 0xb2, 0x6c, 0xd7, 0x66, 0x66, 0x3b, 0x38, 0xa1, 0xbb, 0x1e, 0xff, 0x72, - 0x1f, 0x64, 0x56, 0xc2, 0x53, 0x1c, 0x6f, 0x84, 0x2b, 0xbd, 0x23, 0xd9, - 0xb4, 0x6b, 0x87, 0x79, 0x99, 0xec, 0x81, 0x8d, 0x1a, 0x58, 0x00, 0xf0, - 0x2c, 0xc1, 0xc4, 0x57, 0x74, 0x0f, 0xce, 0x32, 0xe2, 0x5e, 0xae, 0x02, - 0x1c, 0xe8, 0x94, 0xc6, 0x44, 0xaa, 0x7b, 0x9a, 0x32, 0xb5, 0x33, 0xac, - 0xfc, 0x41, 0x65, 0xf2, 0xca, 0xcc, 0xc6, 0x74, 0x36, 0xb2, 0xc9, 0x0e, - 0x26, 0x73, 0xae, 0x68, 0x98, 0xa4, 0x36, 0xe8, 0x98, 0x39, 0xad, 0x05, - 0x3f, 0xca, 0x12, 0xcc, 0x86, 0xfd, 0xc6, 0x57, 0xf0, 0x02, 0x4e, 0x45, - 0xcb, 0x54, 0x34, 0xdd, 0x66, 0x26, 0xab, 0xda, 0x95, 0xa5, 0x85, 0xec, - 0x02, 0x03, 0xb6, 0x29, 0x30, 0x11, 0x40, 0x54, 0x9a, 0x6a, 0x87, 0x2e, - 0x97, 0xa1, 0x7e, 0xeb, 0x34, 0x39, 0x78, 0x3b, 0xbc, 0x5f, 0x8e, 0xc5, - 0x0e, 0x21, 0x29, 0x4b, 0xb7, 0x1b, 0xe7, 0x14, 0x08, 0x34, 0xb7, 0x9a, - 0x0a, 0xb2, 0x6c, 0x25, 0x76, 0xb5, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xe2, 0x7d, 0x48, 0xdd, 0x1a, 0xcb, 0xb6, 0x5c, - 0x6f, 0xbe, 0x32, 0x9d, 0xd2, 0x2b, 0x9e, 0x10, 0x65, 0xd7, 0x1e, 0xec, - 0xc8, 0xb5, 0x10, 0x64, 0x8f, 0x5d, 0xef, 0xfe, 0x9b, 0x6c, 0x9b, 0x02, - 0x6a, 0x6d, 0xf7, 0x98, 0x7b, 0xf7, 0x17, 0xfd, 0x49, 0x1b, 0x6a, 0xc5, - 0x3c, 0xa0, 0xfc, 0xa8, 0x94, 0x95, 0xed, 0x48, 0x81, 0x04, 0x53, 0x8c, - 0xbe, 0xe4, 0x4e, 0xaf, 0xc1, 0x9d, 0xc3, 0xdf, 0xc2, 0xb5, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xae, 0xb0, 0x67, 0x5b, - 0x99, 0x26, 0x07, 0xfb, 0x6c, 0x98, 0xfe, 0xbb, 0x35, 0xf1, 0x5b, 0x02, - 0xc6, 0x03, 0xfc, 0x97, 0x21, 0x16, 0x8d, 0x48, 0xd4, 0x4f, 0x03, 0xd9, - 0x7c, 0x9f, 0xa6, 0x1e, 0x6f, 0x5a, 0x58, 0x17, 0x6d, 0x26, 0xb4, 0xc5, - 0x4c, 0xe5, 0x93, 0x0a, 0x9c, 0xb2, 0x40, 0xbc, 0x60, 0xc7, 0x2b, 0xdb, - 0x3b, 0xc0, 0x3c, 0x5c, 0x44, 0x4b, 0xdd, 0x58, 0xbe, 0xdc, 0xc5, 0xb5, - 0x6a, 0xf9, 0x5e, 0x73, 0x07, 0x58, 0x8f, 0x45, 0x7b, 0xac, 0xba, 0x82, - 0x96, 0x49, 0x4d, 0x22, 0x70, 0x7a, 0x3d, 0x69, 0x26, 0x8b, 0x88, 0x13, - 0xf1, 0x8d, 0xfc, 0xdf, 0x73, 0xd5, 0x20, 0x3c, 0x52, 0x92, 0x16, 0xb1, - 0x6e, 0xb7, 0x41, 0xbe, 0x23, 0x9b, 0x51, 0xf7, 0xc9, 0x38, 0x8a, 0xc7, - 0x6e, 0x68, 0x82, 0xd1, 0x59, 0x50, 0x09, 0x4b, 0x44, 0x3b, 0x28, 0x06, - 0x60, 0x75, 0x7a, 0xe5, 0xa1, 0x36, 0xbb, 0x62, 0x44, 0xe3, 0xd0, 0x68, - 0x14, 0xea, 0xad, 0xf9, 0x18, 0xcc, 0xd5, 0x42, 0x5d, 0x18, 0x53, 0xe6, - 0x4a, 0xfe, 0xde, 0x32, 0xe1, 0xe7, 0xf8, 0x8c, 0x9d, 0x35, 0xf4, 0x4a, - 0xcb, 0x23, 0x2f, 0x91, 0xb5, 0xb0, 0xb2, 0x01, 0x5c, 0x22, 0x8c, 0x42, - 0x42, 0xd5, 0xf0, 0x82, 0x6f, 0x9f, 0x64, 0xe5, 0x99, 0x4d, 0x36, 0x0b, - 0xfc, 0x78, 0x38, 0x30, 0x47, 0x8f, 0x0b, 0x57, 0x86, 0x4f, 0x1b, 0xc9, - 0x05, 0x0e, 0x08, 0xc4, 0xf4, 0xab, 0x9e, 0x90, 0xb4, 0x4f, 0x36, 0x54, - 0xe8, 0xa1, 0x3f, 0x90, 0xd2, 0xf3, 0xb4, 0xb4, 0xdd, 0xf3, 0x43, 0x2f, - 0xc4, 0x43, 0xbb, 0x99, 0x8e, 0xb8, 0x61, 0x59, 0x5e, 0xfa, 0x1b, 0x3c, - 0xc1, 0xeb, 0x9d, 0x35, 0x62, 0x34, 0x82, 0x45, 0xef, 0x41, 0xe9, 0xfc, - 0x35, 0xae, 0xb4, 0x0b, 0xce, 0x52, 0x5b, 0x40, 0x7d, 0xdd, 0x86, 0x83, - 0x52, 0x74, 0x77, 0x11, 0xc2, 0x9b, 0x8c, 0xa3, 0x63, 0xc2, 0x2d, 0xdd, - 0x8c, 0x76, 0x13, 0xc5, 0xc0, 0xde, 0x3e, 0x6b, 0xe1, 0x0f, 0xeb, 0x0f, - 0x0a, 0x25, 0x41, 0x2f, 0x8b, 0x4a, 0x98, 0x30, 0xcb, 0x1a, 0x43, 0xa3, - 0xc1, 0xcc, 0x44, 0x9a, 0x6c, 0xdc, 0x92, 0x40, 0xc4, 0x7a, 0x1f, 0x8a, - 0x6f, 0x74, 0xf3, 0xf5, 0x52, 0x72, 0xf7, 0x81, 0x6e, 0x74, 0x75, 0xe6, - 0xea, 0xd9, 0x57, 0x91, 0xae, 0xf2, 0x3f, 0x35, 0x4b, 0x99, 0xd9, 0x3f, - 0x85, 0xe0, 0x92, 0xaa, 0x35, 0xac, 0x28, 0xbf, 0x43, 0xb8, 0xad, 0xc7, - 0xc5, 0xf6, 0x15, 0x2f, 0x7c, 0xfb, 0x34, 0x48, 0xf3, 0x04, 0x12, 0xf4, - 0x2f, 0x92, 0x74, 0xc8, 0xea, 0xbc, 0x24, 0x6e, 0x3b, 0x0e, 0x9e, 0xf0, - 0xaf, 0x02, 0x97, 0x95, 0xbc, 0x90, 0x7f, 0xc4, 0xf8, 0xe2, 0x04, 0x9a, - 0x8f, 0xfc, 0xbc, 0x50, 0xfe, 0xf7, 0x89, 0x17, 0x2c, 0xdb, 0xd6, 0x5e, - 0xbf, 0xd9, 0x8e, 0x89, 0x8b, 0x06, 0x1d, 0x0b, 0x81, 0x2a, 0x55, 0x5c, - 0x5f, 0xb6, 0xa6, 0xa5, 0xd2, 0xaa, 0x79, 0x9c, 0x39, 0x31, 0x76, 0x03, - 0x98, 0x42, 0xd6, 0xb7, 0x37, 0x1f, 0xc8, 0x51, 0x8a, 0x1c, 0x5d, 0xcd, - 0x9c, 0x78, 0xa4, 0x22, 0x6e, 0x12, 0x10, 0x0a, 0x33, 0xc9, 0xe0, 0xfe, - 0xfc, 0xe8, 0x15, 0xe7, 0xef, 0xd8, 0x6d, 0xc7, 0xc9, 0xc2, 0x8e, 0x18, - 0x82, 0x2f, 0xa6, 0x09, 0x8a, 0xdc, 0x41, 0x6b, 0x89, 0xea, 0xd9, 0xd6, - 0x96, 0xfd, 0xba, 0x6e, 0xae, 0x2d, 0x0c, 0xf9, 0x3c, 0x4c, 0x1a, 0xfa, - 0x98, 0x83, 0x51, 0x45, 0x9d, 0x1e, 0xa5, 0xc1, 0x81, 0x54, 0x37, 0x5d, - 0x28, 0xca, 0xa6, 0xfe, 0x48, 0xf4, 0x77, 0x17, 0x92, 0x1d, 0x0c, 0xb3, - 0x39, 0x77, 0x22, 0xd9, 0xc7, 0xc2, 0xaf, 0x70, 0x0a, 0xd3, 0xa6, 0x57, - 0x69, 0xfb, 0xb9, 0xe0, 0xc4, 0x73, 0x7a, 0x68, 0xee, 0x27, 0x6e, 0x3a, - 0x6e, 0xae, 0x32, 0xf6, 0x09, 0xb3, 0x0b, 0x40, 0x72, 0xc6, 0x26, 0x6e, - 0xc5, 0x88, 0x6b, 0xce, 0x99, 0x88, 0x60, 0x6f, 0x6e, 0xa9, 0xe6, 0xd7, - 0x35, 0x5e, 0x3b, 0x36, 0x0d, 0x14, 0xb8, 0x2f, 0xde, 0x67, 0xc8, 0x2e, - 0x52, 0xc1, 0xf1, 0x58, 0x87, 0x32, 0x2a, 0x52, 0x21, 0x27, 0x1e, 0x04, - 0xed, 0xc4, 0x82, 0xd7, 0xeb, 0x85, 0x12, 0x3e, 0xea, 0xd0, 0x07, 0xa0, - 0x80, 0x48, 0xe9, 0xbd, 0x9b, 0x3a, 0x8e, 0x8b, 0xa0, 0xfc, 0x07, 0xf0, - 0x69, 0x4e, 0xc7, 0x1d, 0xd9, 0x9a, 0x73, 0x18, 0x63, 0xb8, 0xe6, 0x4a, - 0xa0, 0x81, 0xf0, 0xdb, 0xb9, 0x88, 0xf4, 0x2b, 0x1f, 0x0d, 0xda, 0x31, - 0xc0, 0xb0, 0x55, 0x79, 0x56, 0x48, 0x22, 0xbb, 0x49, 0x7f, 0xb1, 0xf1, - 0xf6, 0x6f, 0x42, 0xd3, 0xba, 0x68, 0x3a, 0x8f, 0xe7, 0xac, 0x53, 0x30, - 0x96, 0xec, 0x51, 0x7d, 0xfc, 0xc0, 0x35, 0xe9, 0x59, 0xe7, 0x0e, 0xed, - 0x29, 0x46, 0x50, 0x3c, 0x4b, 0x36, 0xc6, 0x2a, 0xaa, 0x3b, 0xbe, 0xce, - 0xd3, 0xda, 0x4d, 0x65, 0xb0, 0xe8, 0x52, 0x68, 0xf0, 0x23, 0xde, 0x02, - 0x77, 0xb3, 0xcc, 0xce, 0x78, 0xdd, 0x8c, 0xf8, 0xbe, 0x5d, 0x0d, 0xa9, - 0xb6, 0x96, 0x85, 0xbf, 0x92, 0x2a, 0x6b, 0x1b, 0xe8, 0x76, 0x05, 0x13, - 0x30, 0xd8, 0x3d, 0x80, 0xaa, 0xa2, 0xa3, 0xbc, 0x07, 0xba, 0x9c, 0x75, - 0x5b, 0x42, 0x03, 0xd8, 0xde, 0x42, 0x44, 0xf7, 0x29, 0x43, 0x29, 0x0d, - 0x48, 0x2b, 0x02, 0xd0, 0xcc, 0xe9, 0x17, 0x47, 0x23, 0x73, 0x6d, 0xc5, - 0x91, 0x6d, 0x4e, 0xc5, 0xcf, 0xc3, 0x58, 0xaf, 0x6e, 0xa2, 0x9e, 0xe7, - 0xe1, 0x88, 0xac, 0x62, 0xff, 0xbc, 0x69, 0x57, 0xad, 0x0f, 0x08, 0xf8, - 0x32, 0xfd, 0x79, 0xcb, 0x30, 0xbc, 0xd2, 0xe5, 0x20, 0xd9, 0x0f, 0xd1, - 0x33, 0xbf, 0xe4, 0x49, 0x7a, 0x2b, 0x5c, 0xb3, 0x63, 0x13, 0x4d, 0xed, - 0x17, 0xe7, 0x5b, 0xf4, 0x36, 0x9d, 0x3c, 0x4e, 0x51, 0xb2, 0xf7, 0xf2, - 0xcd, 0xfb, 0xec, 0x42, 0x79, 0x46, 0xae, 0x18, 0x50, 0xdf, 0xbf, 0x5b, - 0xb1, 0x9a, 0x49, 0x22, 0xae, 0xe9, 0xf3, 0x86, 0x3f, 0xe0, 0xb4, 0xc6, - 0x9c, 0x08, 0xd6, 0xd9, 0xf4, 0x68, 0xbb, 0x33, 0x0e, 0x59, 0x3d, 0x76, - 0xf0, 0xd7, 0x54, 0x04, 0x19, 0x66, 0xee, 0x61, 0x11, 0x0d, 0x48, 0x10, - 0x21, 0x16, 0x7c, 0xac, 0x49, 0xab, 0xe0, 0x19, 0x85, 0x93, 0x48, 0x65, - 0x7c, 0x5e, 0x6c, 0x1a, 0xf5, 0xb0, 0xc6, 0x80, 0xa1, 0x2a, 0xd5, 0x71, - 0x42, 0xec, 0x2f, 0x25, 0xf7, 0xb8, 0x84, 0xcd, 0xf0, 0x5c, 0xcd, 0xee, - 0x44, 0xcb, 0xeb, 0x74, 0x96, 0x3c, 0xb0, 0x56, 0xcb, 0xaf, 0x7e, 0x9e, - 0x4a, 0x12, 0x06, 0xae, 0x57, 0x43, 0x2d, 0xb2, 0x11, 0x96, 0x05, 0xdb, - 0xb3, 0x1a, 0x01, 0xa7, 0x1d, 0x02, 0x81, 0x1c, 0x36, 0x41, 0x65, 0xf0, - 0x67, 0xd6, 0xd0, 0x0f, 0xec, 0x34, 0x7d, 0xd3, 0x89, 0xac, 0x60, 0x67, - 0x95, 0x81, 0x84, 0xe7, 0xbb, 0x9a, 0x59, 0x36, 0x3b, 0xde, 0xa4, 0x88, - 0xda, 0xf2, 0xd2, 0xa2, 0x0c, 0xba, 0xfb, 0x93, 0xbf, 0xc8, 0xad, 0xe8, - 0x57, 0xa0, 0x2b, 0xbb, 0x4e, 0xa9, 0x38, 0xe7, 0x86, 0x6b, 0x95, 0x34, - 0x24, 0x96, 0xc0, 0x09, 0xd9, 0xfd, 0x5f, 0x1c, 0x93, 0xd9, 0x72, 0xfa, - 0xc4, 0x14, 0x72, 0x9c, 0x19, 0x6f, 0xee, 0x12, 0x17, 0xee, 0x65, 0xb4, - 0x8c, 0x83, 0x39, 0x3c, 0x0f, 0xbf, 0x25, 0xcf, 0xee, 0x05, 0x8c, 0x6a, - 0x56, 0x18, 0xf0, 0x20, 0x72, 0xc1, 0xbf, 0xe4, 0xce, 0x37, 0xbf, 0x2b, - 0xba, 0x70, 0x1e, 0xc2, 0xc8, 0xcd, 0x58, 0xb9, 0x60, 0xc7, 0xfb, 0xd0, - 0xce, 0xb9, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x7c, 0x63, 0x50, 0x90, 0xcb, 0x9c, 0xce, 0x59, 0xb1, 0x47, 0xb0, 0x49, - 0x9b, 0xfc, 0xfb, 0x3d, 0x3d, 0x62, 0xcf, 0x58, 0x4c, 0x2a, 0x79, 0xf0, - 0x72, 0x7f, 0x81, 0x41, 0xac, 0x82, 0x2d, 0xa9, 0xf0, 0x0e, 0x4d, 0xd2, - 0xe0, 0xbd, 0xca, 0x17, 0xb7, 0x59, 0x9f, 0xdb, 0xfe, 0x51, 0x90, 0x88, - 0xb9, 0xeb, 0x4e, 0xac, 0x80, 0x30, 0x64, 0xc4, 0x49, 0xd1, 0xb6, 0x65, - 0x67, 0xef, 0x9d, 0x5c, 0x04, 0xe0, 0x9d, 0xbe, 0x47, 0x75, 0x9b, 0x6e, - 0x30, 0x76, 0xad, 0x37, 0x9a, 0x56, 0xff, 0xcd, 0x40, 0x26, 0x3e, 0xe2, - 0x7d, 0x30, 0x55, 0x09, 0x92, 0x25, 0x36, 0x2f, 0xf8, 0x55, 0xb8, 0x9b, - 0x66, 0x49, 0x41, 0x9d, 0x78, 0x6d, 0x3f, 0x54, 0x41, 0x01, 0x93, 0x9c, - 0x5e, 0x0c, 0x4a, 0x38, 0x79, 0x76, 0xb4, 0x98, 0xae, 0xf9, 0x99, 0x21, - 0x05, 0x6a, 0xfb, 0xbc, 0x44, 0xf7, 0xdc, 0x85, 0x5e, 0x5f, 0x18, 0x49, - 0x22, 0x11, 0x6d, 0xa5, 0x9e, 0x6b, 0x59, 0x60, 0xf8, 0x73, 0x8b, 0xcb, - 0x38, 0xbb, 0xc9, 0xbf, 0x49, 0x0e, 0x57, 0x65, 0x48, 0x41, 0x41, 0xa2, - 0x40, 0x67, 0x91, 0x1d, 0x54, 0xac, 0xa7, 0xef, 0x16, 0x8b, 0xc7, 0xd1, - 0xe6, 0xdb, 0xc5, 0x9c, 0xd4, 0x04, 0x67, 0xd8, 0x75, 0x21, 0x2b, 0x1d, - 0x11, 0xc1, 0x79, 0x45, 0xb4, 0x91, 0x7a, 0x97, 0x00, 0xde, 0xc6, 0xc5, - 0x8a, 0xd1, 0xd7, 0xea, 0xc1, 0x22, 0xe1, 0x58, 0x61, 0xf2, 0x89, 0x3d, - 0xdb, 0x04, 0x3d, 0xe4, 0xe9, 0xe7, 0xbf, 0x4b, 0x99, 0x8a, 0xc6, 0xf2, - 0x09, 0xc4, 0xe2, 0x6d, 0x0b, 0xda, 0x13, 0xfb, 0xff, 0xbf, 0x0b, 0xfc, - 0x78, 0x33, 0xb8, 0x7b, 0x3e, 0xd8, 0xba, 0x27, 0xba, 0xae, 0xdf, 0xce, - 0xea, 0x80, 0x08, 0x38, 0xd8, 0x33, 0x00, 0xa9, 0xb6, 0x88, 0x48, 0xa9, - 0x3b, 0x54, 0xf0, 0x95, 0xda, 0xba, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xb1, 0xd7, 0x8d, 0x6c, 0xb9, 0x96, 0xdc, 0x64, - 0x9b, 0x0c, 0x74, 0x54, 0x59, 0x82, 0xf6, 0x6e, 0x7c, 0x4e, 0x23, 0x83, - 0x04, 0x2e, 0x49, 0xfb, 0x56, 0x4b, 0xcd, 0x0d, 0x76, 0x29, 0xb1, 0xce, - 0x40, 0xa3, 0xd0, 0x02, 0x16, 0x8e, 0x1c, 0x0a, 0x00, 0x5b, 0x8c, 0x06, - 0xf9, 0x07, 0x97, 0x12, 0x0c, 0x33, 0xd5, 0x48, 0x6d, 0xae, 0x7d, 0x2c, - 0x8f, 0x74, 0x32, 0x24, 0xcf, 0x91, 0xd7, 0xbe, 0xb2, 0x05, 0xcf, 0x2f, - 0x93, 0xd5, 0x43, 0x90, 0xce, 0x02, 0x97, 0xf8, 0x51, 0xb3, 0xba, 0x56, - 0x5d, 0x94, 0x41, 0xa4, 0x11, 0xf3, 0x21, 0xc0, 0xcc, 0x28, 0xf8, 0x5a, - 0x00, 0x0a, 0xd4, 0x53, 0xdd, 0xac, 0xfe, 0x25, 0x03, 0xea, 0x2b, 0x6b, - 0x9d, 0x7e, 0x1a, 0xe1, 0x5f, 0x5c, 0xa7, 0x47, 0xa2, 0x72, 0x4f, 0x92, - 0x60, 0x25, 0x7c, 0x1c, 0xa5, 0x34, 0xa6, 0x86, 0x0e, 0xda, 0x8f, 0x3f, - 0xec, 0xe2, 0xe4, 0xad, 0xa9, 0x41, 0xcc, 0x3d, 0x94, 0x43, 0xfd, 0x28, - 0xd8, 0xb0, 0x0f, 0x05, 0x9e, 0x2b, 0x27, 0x3f, 0xe0, 0x84, 0xbc, 0x9e, - 0x7a, 0xa5, 0x83, 0x3d, 0x3b, 0xac, 0x83, 0xd3, 0x16, 0x92, 0x8c, 0xd2, - 0x4a, 0x81, 0xdd, 0xba, 0x0a, 0xb7, 0xc5, 0x9f, 0x83, 0x0f, 0x78, 0xb8, - 0xab, 0x2d, 0xca, 0xf8, 0x6c, 0x06, 0xd7, 0x82, 0xb8, 0x61, 0x7d, 0x2a, - 0x31, 0x3a, 0x39, 0x97, 0x5f, 0xc7, 0x00, 0x6e, 0x46, 0xf2, 0xc5, 0x12, - 0x71, 0x55, 0x5b, 0x10, 0xaf, 0xbb, 0x07, 0x4c, 0x2f, 0xa3, 0x51, 0x53, - 0x22, 0x20, 0xab, 0xed, 0x02, 0x95, 0xc6, 0x5f, 0xaa, 0xb8, 0xc0, 0xcb, - 0xe5, 0xe0, 0x25, 0x97, 0xf7, 0xda, 0x1d, 0xd8, 0x5a, 0xff, 0x76, 0x0c, - 0x3e, 0x33, 0x1b, 0x7a, 0x15, 0xb8, 0x34, 0x75, 0xcf, 0xe9, 0xf3, 0x53, - 0x61, 0x03, 0x2d, 0x52, 0x29, 0x69, 0x3a, 0xc3, 0xd9, 0x22, 0xc0, 0x2d, - 0x80, 0xed, 0x66, 0xc4, 0xf4, 0x89, 0x60, 0x14, 0xdb, 0xec, 0x7d, 0xcc, - 0x99, 0x5c, 0x94, 0x27, 0xab, 0xed, 0xd2, 0x17, 0xf4, 0x36, 0xfc, 0x7e, - 0x99, 0x98, 0xb6, 0x86, 0xb6, 0x7c, 0x54, 0xd6, 0xec, 0xb5, 0xad, 0x62, - 0xcc, 0xb0, 0xf7, 0x8c, 0x52, 0x99, 0xf2, 0x44, 0x27, 0x3a, 0xb0, 0xff, - 0x8f, 0x09, 0xae, 0xe1, 0x61, 0xd8, 0x9f, 0xdd, 0x2f, 0x6b, 0xea, 0xd0, - 0x12, 0x70, 0x8c, 0x9d, 0x8f, 0x4c, 0x36, 0x98, 0x1e, 0x2e, 0xb5, 0x50, - 0x63, 0x33, 0x9c, 0x4b, 0xc3, 0xd4, 0xa0, 0xe6, 0x96, 0x96, 0x75, 0xfd, - 0x8a, 0xc4, 0x0c, 0xa7, 0xea, 0x9d, 0xf1, 0x23, 0x9e, 0x38, 0xff, 0x1a, - 0x67, 0x36, 0x5f, 0x5f, 0x17, 0x88, 0x1a, 0x43, 0x25, 0xea, 0x76, 0xb5, - 0xcd, 0xce, 0x43, 0xf8, 0x71, 0x2b, 0xdb, 0xf0, 0xcd, 0x76, 0xbd, 0x94, - 0x57, 0xdb, 0x77, 0xcd, 0xb2, 0x8f, 0xd1, 0xc0, 0xeb, 0x00, 0x61, 0x7f, - 0x66, 0xb0, 0x43, 0x6e, 0xe0, 0x9f, 0x11, 0x0e, 0x65, 0xf7, 0x4e, 0x00, - 0x74, 0xc3, 0xeb, 0xb1, 0xeb, 0x0c, 0x24, 0x5d, 0x15, 0x56, 0x16, 0x47, - 0x87, 0xcf, 0x34, 0xbe, 0x2a, 0xdd, 0x77, 0x55, 0xa4, 0x09, 0x15, 0x79, - 0x8c, 0xaa, 0xce, 0x32, 0x90, 0x9b, 0x16, 0x40, 0x94, 0x7f, 0x19, 0x27, - 0xbc, 0xbf, 0x45, 0x4b, 0xa5, 0xf0, 0xd0, 0x9e, 0x5b, 0xb9, 0x46, 0x6e, - 0x72, 0x8f, 0x49, 0x3b, 0x7a, 0xc1, 0x92, 0xb0, 0xd5, 0x25, 0x1b, 0x0b, - 0xf3, 0xd0, 0x8a, 0x47, 0x8b, 0xbe, 0xa4, 0xf9, 0x6a, 0x09, 0x84, 0x9a, - 0x5b, 0x5b, 0xea, 0xbb, 0x6f, 0xd8, 0xaf, 0xcd, 0x67, 0x9b, 0x79, 0x7c, - 0x8f, 0xcc, 0xd7, 0x5f, 0x3a, 0xc3, 0xd0, 0xb7, 0xba, 0x28, 0x83, 0x81, - 0x4a, 0x05, 0x51, 0xaf, 0xa0, 0x52, 0x34, 0xe3, 0x4f, 0xec, 0x82, 0xdc, - 0x97, 0xd8, 0x69, 0xb2, 0x0d, 0x68, 0x35, 0x87, 0x58, 0xc0, 0xcf, 0x58, - 0x0d, 0xf6, 0x6b, 0x6d, 0x2a, 0xc0, 0x72, 0xe4, 0x90, 0x8c, 0x7b, 0x45, - 0xba, 0xf1, 0x13, 0x6f, 0x8c, 0xd2, 0xdd, 0xc5, 0x8e, 0xc8, 0xec, 0xf9, - 0xfb, 0xde, 0xe5, 0xaa, 0xcb, 0xc0, 0xff, 0x77, 0x2d, 0x99, 0xb1, 0x69, - 0x7f, 0xe3, 0x38, 0x61, 0x35, 0xb6, 0x45, 0xdd, 0x73, 0x45, 0x84, 0x89, - 0x1b, 0x96, 0x7e, 0x6a, 0x1d, 0xd9, 0xe6, 0x76, 0xa8, 0x16, 0x0f, 0x42, - 0xc9, 0x41, 0xec, 0x5d, 0x25, 0x01, 0xb0, 0x45, 0xa6, 0xaa, 0x69, 0x87, - 0x11, 0xa1, 0xb8, 0x9e, 0x68, 0x48, 0x68, 0xe9, 0xb5, 0xc2, 0xff, 0x83, - 0x8f, 0x71, 0xb9, 0xd7, 0xbb, 0xae, 0x59, 0x8b, 0x1b, 0x4c, 0x44, 0xd8, - 0xe3, 0xce, 0xab, 0x88, 0xfb, 0x64, 0xd9, 0x61, 0x5a, 0x7d, 0xce, 0x3a, - 0x27, 0xb5, 0xa3, 0xfd, 0x5d, 0xa3, 0xb8, 0xa1, 0x15, 0x63, 0x0b, 0x75, - 0x39, 0xc3, 0xa4, 0xfb, 0x60, 0x53, 0xfd, 0x11, 0x21, 0x35, 0x0f, 0x19, - 0x28, 0x14, 0xcd, 0x8a, 0xcf, 0x33, 0xaa, 0x4f, 0x6a, 0x1e, 0x56, 0x87, - 0xd5, 0x6e, 0x43, 0x9b, 0xa3, 0x72, 0x95, 0x8c, 0x34, 0xa2, 0xac, 0x11, - 0x76, 0x95, 0xd7, 0xdd, 0xbf, 0x10, 0xf4, 0x0f, 0x2a, 0x64, 0xd2, 0x4d, - 0x7b, 0xc6, 0x9b, 0x7d, 0xf7, 0xa5, 0xb3, 0x84, 0x9a, 0x9a, 0x5e, 0xcf, - 0x7f, 0x95, 0x6d, 0x44, 0xd1, 0xb2, 0x19, 0xbb, 0xed, 0x37, 0x42, 0x4b, - 0x4b, 0x6d, 0xb7, 0x10, 0x02, 0x5f, 0x00, 0x1f, 0x24, 0xce, 0xb2, 0x8b, - 0x3e, 0x7d, 0xc6, 0x6e, 0x6c, 0x90, 0x75, 0xad, 0x3f, 0x9d, 0x63, 0x04, - 0x76, 0x20, 0x7a, 0x56, 0x48, 0xa1, 0x6a, 0x37, 0x74, 0xd2, 0xb7, 0x4f, - 0xa3, 0x64, 0x62, 0xaa, 0xce, 0x75, 0x8c, 0x15, 0x75, 0x79, 0xa0, 0xbd, - 0xdd, 0x01, 0x46, 0xca, 0xa0, 0x31, 0x1a, 0x16, 0x1f, 0xef, 0x8b, 0xc6, - 0x54, 0x57, 0xfa, 0x6e, 0x43, 0xdf, 0xb0, 0x99, 0xed, 0xa4, 0xcb, 0xeb, - 0x91, 0x35, 0x14, 0x0c, 0xa9, 0x1d, 0xb5, 0xa9, 0x32, 0x99, 0xe3, 0x89, - 0x74, 0xaa, 0xa4, 0x65, 0x1e, 0x82, 0x47, 0xfa, 0x37, 0x23, 0xe5, 0x86, - 0xb6, 0xc0, 0xb6, 0x89, 0x9a, 0xd9, 0xae, 0x29, 0x39, 0x7b, 0x66, 0xc7, - 0x5b, 0x02, 0x08, 0x86, 0xd4, 0xf0, 0x75, 0xc2, 0x05, 0x86, 0xc3, 0x75, - 0xd2, 0x2a, 0x1e, 0xec, 0x6e, 0x75, 0x29, 0x58, 0x8c, 0x25, 0x3b, 0x95, - 0x21, 0xde, 0x42, 0xd5, 0xb7, 0x15, 0x30, 0x09, 0x49, 0x78, 0x55, 0xd5, - 0xf2, 0x30, 0x80, 0x93, 0x8a, 0xce, 0x84, 0x27, 0xdb, 0x4a, 0x09, 0x30, - 0x0c, 0x7f, 0x4d, 0xd1, 0x0f, 0xda, 0x66, 0x58, 0xe1, 0x01, 0xfd, 0x75, - 0x83, 0xf5, 0x39, 0x2e, 0xe2, 0x6b, 0xde, 0xff, 0x20, 0x8a, 0xf7, 0xcc, - 0x81, 0x8e, 0x99, 0xb4, 0xeb, 0x76, 0x74, 0x38, 0x2b, 0xe0, 0x6d, 0x61, - 0x8f, 0x39, 0x59, 0x10, 0x7d, 0xb5, 0xd3, 0x14, 0x96, 0x04, 0x1d, 0x22, - 0x89, 0xef, 0x15, 0x7c, 0x28, 0x5a, 0xd6, 0x8d, 0xf3, 0xb7, 0x6a, 0x9a, - 0xce, 0x21, 0x77, 0xfd, 0x4f, 0x22, 0x26, 0x28, 0xb8, 0xb5, 0xb3, 0x73, - 0xfd, 0x2a, 0x7b, 0x42, 0x26, 0x77, 0x41, 0x93, 0xed, 0xf9, 0x8f, 0xa9, - 0x92, 0xd5, 0x9f, 0x2e, 0x60, 0xec, 0x60, 0x98, 0xf1, 0xd5, 0x11, 0xe2, - 0xe0, 0xd7, 0x45, 0xa7, 0xe4, 0xf2, 0x82, 0x61, 0x2f, 0x41, 0x1b, 0xd9, - 0x8e, 0x78, 0xd5, 0x6b, 0x68, 0x74, 0xf0, 0xc3, 0x83, 0x01, 0x16, 0x60, - 0x6e, 0x34, 0x88, 0x45, 0x8a, 0x86, 0x44, 0x5b, 0xa5, 0xa8, 0x55, 0xbc, - 0xfa, 0x8f, 0xbd, 0x93, 0x95, 0x3f, 0xab, 0x19, 0x54, 0x8f, 0x06, 0x8e, - 0xca, 0x0b, 0x4a, 0x18, 0x3f, 0x7a, 0x9c, 0x3f, 0xe6, 0xbe, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x81, 0x32, 0x41, 0x46, - 0x59, 0x26, 0xf4, 0xef, 0x93, 0x9f, 0x04, 0xc2, 0x67, 0x13, 0x32, 0x45, - 0xc0, 0x79, 0x70, 0x27, 0x21, 0x2b, 0xaf, 0x35, 0xf3, 0xc4, 0x88, 0x52, - 0x28, 0xea, 0xca, 0x8a, 0x08, 0x01, 0x6f, 0x61, 0xab, 0x10, 0xa3, 0xf0, - 0x6b, 0x3b, 0x54, 0x64, 0xf1, 0x63, 0x83, 0x38, 0x2b, 0x26, 0x18, 0x5a, - 0x67, 0xc4, 0x67, 0x38, 0x3f, 0x2c, 0x9a, 0xc9, 0x48, 0x33, 0x77, 0xb4, - 0xb2, 0xc2, 0xc7, 0x08, 0x21, 0x5e, 0xc4, 0x19, 0x59, 0xe1, 0xfa, 0x32, - 0xa4, 0x4c, 0x3e, 0xba, 0x65, 0x92, 0x98, 0x39, 0x71, 0x2f, 0x99, 0x08, - 0xf8, 0xb3, 0x7a, 0x03, 0x53, 0xd7, 0x68, 0xb2, 0x5e, 0xb0, 0xef, 0xe0, - 0x1e, 0x7d, 0xb2, 0x23, 0x5d, 0x2b, 0xd7, 0x09, 0xa6, 0x78, 0xa4, 0x7c, - 0x08, 0xed, 0x8a, 0xf6, 0x96, 0xa0, 0x10, 0x17, 0x62, 0x8b, 0x8a, 0xa0, - 0xac, 0x22, 0x67, 0x02, 0xa8, 0x66, 0x1a, 0xb5, 0x02, 0xde, 0xa5, 0xfa, - 0x69, 0x29, 0x5f, 0x24, 0x89, 0x46, 0x68, 0xd6, 0x51, 0x2a, 0xfe, 0x88, - 0xf0, 0x40, 0xde, 0xd1, 0x12, 0x2e, 0xed, 0x13, 0x7b, 0x49, 0xf6, 0xe1, - 0x7a, 0xcf, 0x61, 0xcb, 0x70, 0x9d, 0xaa, 0x51, 0x07, 0xc2, 0x54, 0x76, - 0x89, 0x29, 0x94, 0x29, 0x8b, 0x0e, 0xf5, 0xe8, 0x81, 0xc7, 0xdb, 0x59, - 0x1e, 0x75, 0xda, 0x6a, 0x94, 0x18, 0x16, 0xae, 0xbb, 0x43, 0x87, 0x56, - 0x66, 0x8b, 0x84, 0xe9, 0xa9, 0xd0, 0xd2, 0x8f, 0x5b, 0xbf, 0x1d, 0x24, - 0x3a, 0xb7, 0x64, 0xff, 0xe9, 0x22, 0x21, 0x65, 0xaf, 0x2b, 0x45, 0x8d, - 0x28, 0xea, 0xbc, 0x07, 0x10, 0x6e, 0xfb, 0x4d, 0x6f, 0x35, 0xe5, 0xeb, - 0x5d, 0x29, 0x72, 0xe1, 0x94, 0xad, 0xed, 0x25, 0xd7, 0x39, 0x63, 0x32, - 0x37, 0x0b, 0xb2, 0xd7, 0x54, 0x1f, 0xe4, 0x0d, 0xe7, 0xb3, 0xd1, 0xa6, - 0x2a, 0xcf, 0x8e, 0x97, 0xf1, 0xa8, 0xfc, 0xb1, 0x61, 0xdc, 0xb4, 0x8f, - 0x29, 0xa2, 0x68, 0x4a, 0xe6, 0x2f, 0x8a, 0x69, 0x2c, 0xa1, 0x1d, 0xe2, - 0x9e, 0x65, 0x71, 0xb7, 0x83, 0xef, 0x63, 0xf5, 0x36, 0xdc, 0xa0, 0x94, - 0x5a, 0x45, 0x8a, 0x85, 0x5e, 0x28, 0x86, 0x21, 0xd2, 0xbf, 0x7a, 0x2f, - 0x76, 0x1c, 0x2a, 0x15, 0xb2, 0xe8, 0xaf, 0x63, 0x37, 0xbe, 0xd8, 0x0a, - 0xef, 0x54, 0xee, 0xe6, 0xd9, 0xb3, 0xdb, 0x41, 0x55, 0xba, 0xd8, 0x14, - 0x7c, 0x10, 0x61, 0x06, 0x40, 0x45, 0x69, 0x37, 0x60, 0xf7, 0x6a, 0x7a, - 0x23, 0x70, 0x30, 0x57, 0x3e, 0xe5, 0x12, 0x24, 0xbc, 0x5e, 0x82, 0x89, - 0xd8, 0x37, 0xc9, 0x33, 0xb9, 0x38, 0xa5, 0xba, 0xed, 0xdd, 0x93, 0x58, - 0x81, 0x15, 0xec, 0x15, 0x70, 0x2f, 0x30, 0xfa, 0xaf, 0xf7, 0xf5, 0xcb, - 0x41, 0x74, 0xea, 0xc0, 0x91, 0xbe, 0x53, 0x4c, 0xc2, 0x74, 0x1b, 0x5b, - 0x8c, 0x74, 0xd8, 0xc3, 0x4a, 0x12, 0xaa, 0x57, 0xd6, 0x61, 0xb1, 0xb8, - 0x81, 0x5d, 0x81, 0x37, 0x1e, 0x5b, 0x3d, 0x5a, 0xbc, 0xa6, 0xb2, 0x27, - 0xe3, 0x01, 0x4c, 0xf0, 0xad, 0x7b, 0xdf, 0x50, 0xf9, 0xd7, 0xb7, 0xcc, - 0xa8, 0x5c, 0x3d, 0x9a, 0xb7, 0x60, 0x3e, 0x63, 0x3f, 0x6a, 0x08, 0x0b, - 0x82, 0xdc, 0x3e, 0xfa, 0x24, 0x33, 0xd3, 0x01, 0xbf, 0xef, 0xeb, 0x52, - 0x3f, 0x91, 0x61, 0xda, 0xe2, 0x26, 0x10, 0xdf, 0xe4, 0x9b, 0x77, 0x91, - 0x22, 0xc5, 0x4e, 0x9c, 0x0b, 0x32, 0xff, 0x27, 0x85, 0x85, 0x0c, 0x99, - 0x50, 0x8f, 0xad, 0x5d, 0x06, 0x18, 0x52, 0xb4, 0x64, 0x09, 0xc4, 0xa4, - 0x84, 0xd4, 0x81, 0x07, 0x0a, 0x97, 0x55, 0xf8, 0x96, 0x52, 0xb2, 0x9a, - 0xf4, 0x06, 0x2c, 0x9a, 0x3b, 0x8b, 0xaa, 0x67, 0x18, 0x3a, 0xee, 0xbc, - 0xca, 0x8f, 0x46, 0xf6, 0x4a, 0x33, 0x5b, 0x56, 0x09, 0xb2, 0x72, 0x87, - 0xdb, 0xbb, 0x57, 0x67, 0x53, 0x82, 0x77, 0x31, 0x66, 0xbb, 0xf1, 0x33, - 0x6d, 0x55, 0x82, 0xaa, 0x80, 0xd4, 0x4d, 0xb8, 0xab, 0xbd, 0x2a, 0xda, - 0x10, 0x3a, 0xc8, 0xf0, 0x14, 0x1e, 0xcb, 0x8e, 0x76, 0x6c, 0xc8, 0x74, - 0x05, 0xb3, 0x51, 0xbd, 0x63, 0x06, 0x69, 0x05, 0x2a, 0x21, 0xd6, 0x2f, - 0xe4, 0x38, 0xae, 0xf8, 0xd4, 0xe9, 0xa7, 0xe8, 0xc8, 0x5a, 0x65, 0x7d, - 0x54, 0x34, 0x33, 0x0d, 0xf6, 0x07, 0xd6, 0x8c, 0xe5, 0x72, 0x9b, 0xfb, - 0x60, 0x49, 0xd2, 0xaf, 0xb4, 0x17, 0xc4, 0x74, 0x8d, 0xe5, 0x54, 0xda, - 0x96, 0x56, 0x7d, 0x97, 0x62, 0xe8, 0xec, 0x0d, 0x2b, 0x02, 0x2e, 0x59, - 0xf8, 0xa1, 0x06, 0x6a, 0xb6, 0x3e, 0x15, 0xeb, 0x64, 0x1a, 0x48, 0x3d, - 0x53, 0x2c, 0x42, 0x3b, 0x97, 0xa1, 0x3f, 0x47, 0x8b, 0x74, 0x87, 0x8b, - 0x96, 0x63, 0x08, 0x4c, 0x99, 0x38, 0x5a, 0xb6, 0x93, 0xa8, 0xcc, 0xee, - 0x62, 0x3a, 0x00, 0x6d, 0x5c, 0xab, 0x77, 0x3c, 0x46, 0xae, 0x6e, 0xeb, - 0xf1, 0xf9, 0x63, 0xf1, 0xa2, 0x31, 0x21, 0x38, 0xc3, 0x4f, 0xe2, 0x3a, - 0x33, 0x7f, 0xe7, 0xc6, 0x69, 0xd5, 0x1c, 0x7e, 0x5b, 0x4f, 0xb1, 0x50, - 0x3b, 0xbe, 0x31, 0xa7, 0x42, 0xa3, 0x97, 0x7b, 0xe3, 0x90, 0xd0, 0x07, - 0xfd, 0x05, 0xb9, 0xf2, 0x47, 0xc4, 0xc8, 0xdd, 0x1c, 0x3c, 0xa4, 0x22, - 0x96, 0x04, 0xca, 0x28, 0x17, 0xcc, 0x5c, 0x49, 0x7e, 0xc6, 0x93, 0x98, - 0xd3, 0x8b, 0xd2, 0xf6, 0x4a, 0xb6, 0xbe, 0x8d, 0xa2, 0xdd, 0xb6, 0x7c, - 0x66, 0x0c, 0x29, 0xcb, 0x1d, 0x98, 0xf6, 0xe4, 0xe5, 0x30, 0x4c, 0x84, - 0xbf, 0x6f, 0x71, 0x4e, 0xc2, 0x12, 0x9f, 0x35, 0xd6, 0xf8, 0xc6, 0x30, - 0xe9, 0x9e, 0x1a, 0x8a, 0x2f, 0xd1, 0x96, 0xb3, 0x3c, 0x0f, 0xf5, 0x78, - 0xa7, 0xe0, 0xbd, 0x4b, 0xe0, 0xd8, 0x3d, 0x57, 0xa5, 0x44, 0xa0, 0xd9, - 0x10, 0x79, 0xd2, 0x10, 0x50, 0xc7, 0x77, 0x73, 0x09, 0xf8, 0xb4, 0xcf, - 0x66, 0xe3, 0x0c, 0xfb, 0x96, 0xf8, 0x52, 0xb3, 0x7e, 0x44, 0xf0, 0x03, - 0x54, 0xd4, 0xa2, 0x57, 0x38, 0x8a, 0x96, 0xfc, 0x7c, 0x4c, 0x9f, 0x3a, - 0xf2, 0xa2, 0x48, 0xbb, 0x3e, 0xd1, 0x11, 0x2c, 0xab, 0xdf, 0x53, 0x96, - 0xac, 0x58, 0x33, 0xb9, 0xdd, 0xd2, 0x4f, 0x8a, 0x0a, 0x89, 0x0e, 0xd3, - 0x6f, 0x58, 0x8c, 0xa1, 0x0a, 0x0b, 0xa7, 0xd7, 0x1f, 0x0a, 0x70, 0xe3, - 0x43, 0x12, 0x56, 0xb8, 0x6c, 0xf8, 0x75, 0x4e, 0x2b, 0xb0, 0x17, 0x29, - 0xe4, 0x95, 0x85, 0xd8, 0x85, 0x95, 0x63, 0x55, 0xa8, 0x82, 0xf0, 0xe7, - 0x7d, 0xf3, 0xf1, 0x78, 0x66, 0xd1, 0x92, 0x71, 0x99, 0xad, 0x30, 0x94, - 0xe9, 0x54, 0x2c, 0xe1, 0x57, 0xf3, 0x6a, 0xe6, 0x0c, 0x5e, 0xc7, 0x58, - 0xba, 0xb7, 0x61, 0xd3, 0x74, 0x72, 0x96, 0x06, 0x0b, 0x01, 0x3d, 0xc2, - 0xa1, 0xb4, 0x38, 0x81, 0x19, 0x44, 0xbc, 0x84, 0x52, 0x22, 0xc9, 0x67, - 0x81, 0x99, 0xfb, 0x0a, 0xc2, 0xff, 0x50, 0x67, 0xbe, 0x38, 0x5e, 0x13, - 0x16, 0x60, 0x83, 0x35, 0xb9, 0x2f, 0xa9, 0x55, 0xbb, 0x30, 0x6b, 0x19, - 0xfc, 0x2a, 0x40, 0x24, 0x74, 0x20, 0x57, 0x78, 0xb9, 0x55, 0xb7, 0x70, - 0x86, 0x65, 0x43, 0x1c, 0x76, 0x2e, 0x91, 0x83, 0x5e, 0x33, 0xc2, 0xd4, - 0xcc, 0xb5, 0x1c, 0x45, 0xaf, 0xa3, 0x87, 0x95, 0x9b, 0x77, 0x50, 0x44, - 0x7e, 0xdd, 0xca, 0x3f, 0x51, 0x21, 0xae, 0xf2, 0x15, 0xa9, 0x32, 0x94, - 0xca, 0xde, 0x3b, 0x97, 0x13, 0x6b, 0xff, 0xe0, 0x79, 0x39, 0x40, 0xf0, - 0x66, 0x7d, 0x5e, 0xef, 0xec, 0x0a, 0x35, 0xd2, 0x0d, 0x09, 0x19, 0x13, - 0xf2, 0xc2, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xdc, 0x07, 0x2e, 0x46, 0xab, 0x4d, 0x6d, 0xf7, 0x24, 0xba, 0x02, 0xe3, - 0xc5, 0xe3, 0xed, 0x64, 0xc6, 0x77, 0x5a, 0x14, 0xae, 0x38, 0x52, 0x8c, - 0x16, 0x2c, 0x52, 0x0e, 0xf6, 0x65, 0x99, 0xcc, 0xf6, 0x9f, 0x77, 0xcc, - 0x2e, 0xaf, 0x14, 0xd1, 0xf0, 0x0f, 0xa7, 0x3e, 0x5b, 0x74, 0xff, 0xb9, - 0xd3, 0x30, 0x02, 0x5e, 0x52, 0xc8, 0x6f, 0x57, 0xef, 0x28, 0xf5, 0xfa, - 0x9e, 0x70, 0x00, 0xfc, 0x3e, 0xc3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0xaa, 0x9f, 0x86, 0xb0, 0x6d, 0xa1, 0x0c, 0xfa, - 0xef, 0xb3, 0x6a, 0x50, 0xa6, 0xfe, 0xff, 0xa9, 0x61, 0x0b, 0x18, 0x72, - 0xee, 0xc6, 0xcd, 0x3a, 0x34, 0x5e, 0xa8, 0x81, 0x31, 0x54, 0x25, 0x05, - 0xc1, 0xd9, 0x66, 0x3d, 0x17, 0xbb, 0x03, 0x21, 0x07, 0x69, 0x3a, 0x37, - 0xe8, 0xd4, 0x6a, 0x68, 0xe1, 0xa3, 0x19, 0x5a, 0x8d, 0x14, 0x11, 0x09, - 0xef, 0xae, 0xfe, 0x94, 0x19, 0x8a, 0xe4, 0xb9, 0x6e, 0xe8, 0xfa, 0x12, - 0x2a, 0x5d, 0x00, 0x29, 0x27, 0x6d, 0x5a, 0xa5, 0x09, 0x34, 0x79, 0x2b, - 0xa8, 0xcc, 0x42, 0xb4, 0xde, 0xe0, 0x91, 0xb9, 0x06, 0x0c, 0x11, 0x17, - 0x25, 0x7a, 0x35, 0x57, 0x51, 0x40, 0xf3, 0xc7, 0xc6, 0x4a, 0x69, 0x98, - 0x2b, 0x2b, 0x3e, 0x5d, 0x32, 0xd8, 0x8f, 0xb0, 0x1d, 0xee, 0x77, 0xe3, - 0xaf, 0x4f, 0x71, 0x05, 0x04, 0xd2, 0xff, 0x51, 0xed, 0xa4, 0x69, 0x50, - 0x24, 0x2a, 0xe5, 0xaa, 0xbb, 0xc6, 0x7a, 0x7f, 0xb2, 0xdf, 0x1d, 0xc2, - 0x02, 0x2e, 0x52, 0xd1, 0xd9, 0x5b, 0xe7, 0x6c, 0x50, 0x31, 0x4e, 0xdf, - 0x8e, 0x3f, 0x37, 0xfc, 0xf5, 0x34, 0x0e, 0xdb, 0x4c, 0x5d, 0x7d, 0xc8, - 0xe4, 0x72, 0x40, 0xcb, 0x95, 0xa5, 0x41, 0xeb, 0x78, 0x5f, 0x64, 0x20, - 0x55, 0x19, 0xc7, 0xf9, 0x9c, 0x71, 0x40, 0x8f, 0xcc, 0x2d, 0x86, 0xc0, - 0xf4, 0x36, 0x2b, 0x0e, 0x28, 0xb4, 0xad, 0x1b, 0xde, 0x60, 0x67, 0x03, - 0x0f, 0x7c, 0x18, 0xd9, 0xc3, 0x73, 0x67, 0x0d, 0x44, 0x3d, 0xbe, 0x7c, - 0xcf, 0x96, 0x22, 0x0b, 0x0e, 0x3a, 0x0b, 0xcf, 0x04, 0x95, 0x92, 0x7d, - 0x4b, 0xa2, 0x6a, 0x0b, 0x47, 0x72, 0x73, 0xa8, 0x9b, 0x96, 0x3d, 0xc6, - 0x03, 0x34, 0xb1, 0x69, 0xc2, 0x50, 0x60, 0x89, 0x8c, 0x55, 0x8f, 0x8e, - 0x74, 0xa8, 0x9e, 0x25, 0xe4, 0x0e, 0x73, 0xef, 0x4f, 0x51, 0xbe, 0xed, - 0x5c, 0x14, 0xd3, 0xfa, 0x94, 0x58, 0x8d, 0x5c, 0xa0, 0xb1, 0xfc, 0x37, - 0x6e, 0x9c, 0x9e, 0x61, 0xe5, 0x12, 0x13, 0xb2, 0x88, 0xc6, 0xcf, 0x60, - 0x3f, 0x0d, 0x51, 0x33, 0x22, 0xfa, 0xfb, 0x2d, 0x2b, 0x8d, 0x43, 0x9b, - 0x3d, 0x1e, 0x88, 0x24, 0x50, 0x78, 0xf7, 0x7e, 0x45, 0xb1, 0x0f, 0xa9, - 0xe6, 0x77, 0xf8, 0x78, 0xff, 0x57, 0x6a, 0x05, 0x06, 0x0c, 0x7e, 0x1e, - 0x7f, 0xe9, 0x90, 0xe8, 0x61, 0x68, 0xbc, 0x9e, 0xc4, 0xe5, 0x06, 0x04, - 0x76, 0xcc, 0x01, 0x57, 0x1a, 0x55, 0x9e, 0x45, 0x26, 0xd6, 0xd8, 0xc2, - 0x50, 0x25, 0xfc, 0x72, 0x4e, 0x18, 0xbe, 0xf2, 0x2f, 0xc0, 0x1b, 0xc8, - 0x14, 0xeb, 0x24, 0xda, 0x15, 0x0a, 0x83, 0x38, 0xc5, 0xdd, 0xc9, 0xd7, - 0x12, 0x35, 0x55, 0xdf, 0x2c, 0x23, 0xea, 0x17, 0xca, 0xbf, 0x18, 0xc9, - 0x80, 0x63, 0x4b, 0x77, 0x8b, 0x17, 0x01, 0x05, 0x1b, 0xa3, 0x0b, 0x0f, - 0xdd, 0xc6, 0xe0, 0xdf, 0xc9, 0xa6, 0x8c, 0x50, 0x95, 0x8d, 0x6c, 0x96, - 0x67, 0xff, 0x88, 0x38, 0x3b, 0x76, 0x72, 0x11, 0x35, 0xa0, 0x1c, 0xc8, - 0x96, 0x9c, 0xe5, 0x90, 0x79, 0x0e, 0x62, 0x57, 0x00, 0xd9, 0x57, 0xf8, - 0xa4, 0xc2, 0xc2, 0x0a, 0x17, 0x8e, 0xd7, 0x03, 0x6d, 0x4d, 0x14, 0xb6, - 0x96, 0x8a, 0x76, 0x67, 0x58, 0xce, 0x9c, 0xb3, 0x10, 0x49, 0x06, 0xeb, - 0x56, 0x43, 0x40, 0xcb, 0xd4, 0xd7, 0x59, 0x42, 0xa4, 0xd7, 0x21, 0x6a, - 0x51, 0x3d, 0x1c, 0x54, 0xd7, 0xd6, 0xa2, 0xcf, 0xf8, 0xf6, 0x72, 0x35, - 0x04, 0xa6, 0xe3, 0x53, 0xca, 0xc5, 0x62, 0xee, 0xa9, 0xc3, 0x6d, 0x1b, - 0xc4, 0xc5, 0xd9, 0xa7, 0x37, 0xc2, 0x04, 0x01, 0xc9, 0x4a, 0x2e, 0x26, - 0xdd, 0x12, 0x6e, 0x41, 0x64, 0xb4, 0xe8, 0xe8, 0xc7, 0xf8, 0xab, 0x8a, - 0xab, 0x1d, 0x7f, 0x2d, 0x58, 0xc2, 0xc4, 0xf0, 0x5d, 0x11, 0x35, 0x52, - 0x88, 0xbc, 0x0f, 0x44, 0x6e, 0x91, 0x1e, 0x87, 0xb4, 0xb1, 0x91, 0x52, - 0x32, 0xe4, 0x38, 0x6d, 0x5e, 0x8d, 0x30, 0xf0, 0xbc, 0xc3, 0x15, 0x80, - 0x47, 0x36, 0x35, 0xb0, 0x93, 0xf3, 0xc4, 0x82, 0xc7, 0x73, 0xc1, 0x67, - 0x0c, 0x7a, 0x31, 0x36, 0xbc, 0x73, 0x67, 0x66, 0xae, 0x48, 0x82, 0x27, - 0x6e, 0x14, 0xd0, 0xd5, 0x12, 0x10, 0xce, 0x5e, 0x37, 0xcd, 0x7e, 0xa5, - 0xcb, 0xff, 0x91, 0xf0, 0x62, 0xdb, 0x95, 0x74, 0x0c, 0x8c, 0x1e, 0x78, - 0x11, 0x02, 0xb3, 0x02, 0x0b, 0x31, 0xe7, 0x4e, 0x8b, 0x58, 0x6a, 0xde, - 0x20, 0x93, 0x8b, 0x8e, 0x62, 0x03, 0x24, 0xc9, 0xca, 0xf8, 0x44, 0x1d, - 0x0c, 0x1b, 0xd8, 0x5d, 0xcc, 0xe2, 0x8e, 0x02, 0xc6, 0x5c, 0x06, 0x45, - 0xe6, 0x94, 0x8f, 0xa2, 0x3e, 0xf5, 0xe9, 0xf5, 0x88, 0x87, 0xb2, 0x84, - 0x1e, 0xb6, 0xb6, 0xfc, 0x9f, 0x8e, 0x79, 0xf5, 0x4b, 0x24, 0x81, 0x3e, - 0x5d, 0xf4, 0x10, 0x6e, 0xdd, 0x8c, 0x8c, 0xae, 0xc6, 0x2c, 0x26, 0xb2, - 0xfc, 0xf3, 0x99, 0xe8, 0x8c, 0x65, 0x5d, 0x6c, 0xa8, 0x1d, 0x6f, 0x1e, - 0x32, 0x0a, 0xee, 0x87, 0xf6, 0xe1, 0xdd, 0x5e, 0x7f, 0x7a, 0x90, 0x8c, - 0x3f, 0xe8, 0x47, 0x95, 0x9b, 0xc8, 0x2c, 0x49, 0xc9, 0xe4, 0x2d, 0xea, - 0x58, 0xfc, 0x29, 0x1a, 0xb7, 0xa1, 0xf9, 0xb8, 0x84, 0x41, 0xa0, 0xf1, - 0x77, 0x83, 0x56, 0x73, 0x86, 0xea, 0xf4, 0xf5, 0x2a, 0xa6, 0x6b, 0x00, - 0x64, 0x39, 0x08, 0x8f, 0xf0, 0x22, 0x1a, 0x4c, 0xf2, 0x5a, 0xd0, 0xaa, - 0x39, 0xae, 0x8a, 0xbc, 0x03, 0x99, 0xf7, 0xcc, 0x80, 0xdf, 0x2b, 0x85, - 0xbe, 0x1a, 0x97, 0x28, 0x63, 0x04, 0x72, 0x75, 0x75, 0xb4, 0x9c, 0xd3, - 0x17, 0xcc, 0x1e, 0xa1, 0xd2, 0x47, 0x18, 0x45, 0xad, 0xb4, 0x0a, 0x32, - 0x31, 0x36, 0x64, 0x48, 0x3f, 0x7b, 0x4b, 0xc0, 0xd6, 0x78, 0x46, 0xaa, - 0x90, 0x89, 0xf9, 0x36, 0x3d, 0xb4, 0xb3, 0x50, 0x51, 0xd9, 0x55, 0x6f, - 0xa9, 0xe7, 0x25, 0xaf, 0xa0, 0xca, 0x9d, 0x45, 0x83, 0xc3, 0x0b, 0x2a, - 0x0c, 0xf9, 0x3f, 0xe4, 0x08, 0xf4, 0xbd, 0x23, 0x45, 0x85, 0xcf, 0x41, - 0x93, 0xd3, 0x21, 0x5f, 0x53, 0xa2, 0x5b, 0xa9, 0xf5, 0xe9, 0x8f, 0x2a, - 0x2d, 0x53, 0x3c, 0x36, 0x17, 0xce, 0x37, 0x35, 0x3e, 0x9e, 0x6b, 0xbc, - 0xba, 0xaa, 0xa5, 0x61, 0x79, 0x98, 0x8e, 0xbd, 0x19, 0xf4, 0x5f, 0xa9, - 0xb8, 0x96, 0xa2, 0xce, 0x32, 0x00, 0xab, 0x51, 0xcb, 0xfa, 0x30, 0x3a, - 0x83, 0x92, 0x91, 0xad, 0x08, 0x61, 0x62, 0x51, 0x7f, 0x19, 0xa9, 0x2a, - 0x84, 0xf2, 0xab, 0x7e, 0x5e, 0xa7, 0x5a, 0x54, 0x7f, 0x68, 0x2a, 0x7b, - 0x4f, 0xde, 0x45, 0x1d, 0xef, 0x73, 0x5f, 0xc0, 0x40, 0x6e, 0xec, 0x6c, - 0xe9, 0xa5, 0x6b, 0x46, 0x54, 0x7c, 0x24, 0x8b, 0xa4, 0xe5, 0xb4, 0x82, - 0x31, 0x1f, 0x3e, 0x79, 0x2e, 0x21, 0x8c, 0xf1, 0xbd, 0xad, 0x7c, 0x28, - 0xcc, 0xbd, 0x58, 0x72, 0xe9, 0x6a, 0x04, 0x56, 0x67, 0x0f, 0x62, 0x98, - 0x5a, 0x97, 0x4b, 0xe2, 0x67, 0x70, 0xbb, 0x17, 0xb1, 0x84, 0x5b, 0xd4, - 0x6e, 0xab, 0x90, 0x29, 0x20, 0x93, 0x34, 0xa8, 0x03, 0x0f, 0xed, 0x1a, - 0xf0, 0x1b, 0x92, 0x87, 0x43, 0xa5, 0x6a, 0x1c, 0xdc, 0xd7, 0x22, 0x68, - 0x83, 0x98, 0x74, 0x2a, 0x4c, 0x51, 0xef, 0x71, 0x19, 0xd5, 0x3d, 0x05, - 0x19, 0x61, 0xb2, 0x52, 0xa8, 0x6e, 0xda, 0x72, 0x51, 0x66, 0x9f, 0xf0, - 0x12, 0xf6, 0x18, 0x60, 0xcc, 0xd7, 0x2f, 0x2e, 0x83, 0x14, 0x09, 0xdb, - 0x55, 0x1c, 0xf2, 0xaf, 0xfd, 0xa4, 0x40, 0xf1, 0x4a, 0xc7, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x9c, 0x52, 0xff, 0x48, - 0x06, 0x61, 0x76, 0x6d, 0xd7, 0x44, 0xb1, 0x0c, 0x32, 0x62, 0x15, 0xa1, - 0xc3, 0x97, 0x03, 0xdd, 0xed, 0x20, 0x3c, 0x3a, 0x09, 0x16, 0xe5, 0x7d, - 0x8c, 0xf9, 0x7b, 0x22, 0x5e, 0x3a, 0xdd, 0xf0, 0xc6, 0xf0, 0x3a, 0xd4, - 0x94, 0x85, 0x1c, 0x60, 0x74, 0x91, 0xa3, 0xe2, 0x8a, 0xe5, 0x3e, 0xd4, - 0x95, 0x28, 0x8b, 0x1a, 0x7b, 0xbe, 0x07, 0xc0, 0xe3, 0x6b, 0xb9, 0x85, - 0x82, 0x0b, 0x24, 0xba, 0x1c, 0xfc, 0xc0, 0x0a, 0x21, 0x33, 0xad, 0x00, - 0x19, 0xce, 0xb5, 0x8f, 0x73, 0x05, 0xf1, 0xac, 0x03, 0xbe, 0x1f, 0x22, - 0xd5, 0x32, 0x5e, 0x50, 0xe3, 0xe0, 0x62, 0x26, 0xf4, 0xb0, 0x85, 0xd8, - 0xf7, 0xa7, 0xf4, 0xa7, 0xff, 0x10, 0xb8, 0xbc, 0xe0, 0x3e, 0x4d, 0xcb, - 0x37, 0x74, 0xcc, 0x85, 0xed, 0xa0, 0x34, 0x6c, 0xfa, 0x37, 0x84, 0x6a, - 0x94, 0x55, 0x3b, 0x1e, 0x14, 0xab, 0x26, 0x7b, 0x3e, 0xac, 0xc3, 0x79, - 0xcd, 0x1b, 0x00, 0x02, 0xb3, 0x01, 0xc3, 0x10, 0xdd, 0x56, 0x7d, 0x0e, - 0x69, 0x39, 0x3c, 0x17, 0xa3, 0xae, 0x9c, 0x2d, 0xc7, 0x5a, 0x0b, 0x7c, - 0xd0, 0xac, 0xa1, 0x91, 0x6a, 0x6d, 0xc0, 0x3f, 0x98, 0xf1, 0x21, 0xf5, - 0xa5, 0x7c, 0xbc, 0x70, 0x0d, 0x7b, 0x2f, 0x0d, 0x5a, 0xa5, 0x4a, 0x5a, - 0xff, 0x51, 0xbf, 0x7f, 0xb5, 0x4f, 0x2c, 0xba, 0xa9, 0x46, 0x81, 0x6b, - 0xac, 0xc6, 0x62, 0x2d, 0xd7, 0xb5, 0x04, 0x5f, 0xd4, 0x5f, 0x1f, 0x6b, - 0x11, 0x7d, 0xe3, 0x58, 0x1f, 0xb5, 0xbf, 0x16, 0x43, 0x88, 0x05, 0xf5, - 0xa4, 0x7b, 0xb5, 0x0e, 0xf4, 0x01, 0xb6, 0x90, 0x69, 0x52, 0x0a, 0x5e, - 0x9b, 0x87, 0x51, 0x5e, 0xd5, 0xed, 0x2c, 0xcc, 0x58, 0xad, 0xe6, 0x77, - 0xa2, 0xc5, 0x7c, 0x1e, 0xc5, 0x92, 0xbe, 0xed, 0x3a, 0x9a, 0x97, 0xed, - 0x56, 0xc8, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x16, 0xe8, 0x24, 0xe3, 0x82, 0x36, 0x8e, 0x50, 0x45, 0xbe, 0xc6, 0x10, - 0x02, 0xb9, 0x6d, 0xf9, 0xed, 0x8f, 0x64, 0x35, 0x4d, 0x2c, 0x9f, 0x99, - 0xdc, 0xee, 0xfa, 0x63, 0x99, 0xc4, 0xb8, 0x3d, 0x77, 0xea, 0xda, 0xd5, - 0x95, 0x8b, 0x8e, 0x76, 0x02, 0x9c, 0x62, 0xa0, 0xad, 0xfe, 0x80, 0x61, - 0x72, 0x59, 0xd6, 0x9f, 0x16, 0x2e, 0x09, 0x71, 0xb8, 0xd7, 0x65, 0x25, - 0xc2, 0x5b, 0x40, 0x67, 0x8e, 0xd6, 0xf8, 0xdf, 0x67, 0x29, 0x19, 0xa2, - 0xa6, 0x07, 0xf3, 0xc8, 0x91, 0x7d, 0xf2, 0x50, 0x71, 0xba, 0x5c, 0x2d, - 0xa7, 0xae, 0xc4, 0xd5, 0xeb, 0xb9, 0x0d, 0x2d, 0x23, 0xe5, 0x8c, 0x65, - 0xf5, 0xf8, 0x97, 0x69, 0xde, 0x25, 0x6f, 0xea, 0x12, 0x72, 0x3e, 0xb9, - 0xa7, 0x8d, 0xcf, 0xa5, 0x66, 0xee, 0x4e, 0x2e, 0x66, 0x6b, 0xec, 0x77, - 0x7f, 0x53, 0xdc, 0x29, 0x73, 0x5e, 0xe9, 0x2f, 0x79, 0xac, 0x8d, 0x0f, - 0x44, 0x09, 0x5d, 0x25, 0x1d, 0x78, 0xb6, 0xe9, 0xd0, 0xfa, 0x8f, 0x5f, - 0x9c, 0xf0, 0xe0, 0xfc, 0x62, 0x9f, 0x52, 0x6b, 0x5b, 0x8e, 0x3f, 0xdf, - 0xb4, 0xf1, 0xdf, 0x35, 0xd0, 0x8f, 0x5a, 0xc9, 0x1f, 0x08, 0x86, 0xaa, - 0x5a, 0x9e, 0xe8, 0xb0, 0xaa, 0xd4, 0xcd, 0x2a, 0x5b, 0x4f, 0x7f, 0x39, - 0x9f, 0x7f, 0x21, 0xf2, 0xfd, 0x05, 0x96, 0x53, 0x09, 0xfd, 0x36, 0x4c, - 0xcd, 0x98, 0x74, 0xf5, 0xbd, 0xcd, 0x9e, 0x14, 0x15, 0x05, 0xb9, 0x3d, - 0x5f, 0x8a, 0x02, 0x86, 0x10, 0xd7, 0xd4, 0x01, 0x20, 0xd9, 0x8c, 0x65, - 0x7d, 0x9d, 0x39, 0x25, 0xbc, 0xce, 0x1a, 0xb1, 0x76, 0x92, 0xc3, 0x03, - 0xed, 0xa2, 0x41, 0x31, 0x0d, 0xc0, 0x40, 0x94, 0x01, 0xbc, 0x9b, 0xe9, - 0x5e, 0x3e, 0x8c, 0x49, 0xf6, 0x98, 0x0c, 0x39, 0x79, 0xdc, 0xd1, 0x1b, - 0xc5, 0xb2, 0x20, 0xb4, 0x6c, 0xb4, 0x4f, 0xce, 0xf4, 0x6c, 0x0b, 0xef, - 0x85, 0xf2, 0x7d, 0x9a, 0x90, 0x58, 0x1b, 0x51, 0x56, 0x52, 0xac, 0x75, - 0x9f, 0x17, 0xe6, 0x48, 0xaf, 0x18, 0x4c, 0xd8, 0x67, 0xe8, 0xd2, 0x61, - 0xbc, 0xa0, 0x95, 0xc9, 0x78, 0xd8, 0xa2, 0x1d, 0x47, 0x59, 0x30, 0xcf, - 0xf3, 0x79, 0x06, 0xd4, 0x25, 0xf8, 0x9c, 0x5c, 0x28, 0xee, 0xb0, 0xd2, - 0xb6, 0xaf, 0x34, 0x0e, 0xe5, 0xe4, 0x16, 0x2e, 0x05, 0x45, 0x23, 0xc1, - 0x88, 0x90, 0x4a, 0x8f, 0xff, 0xfb, 0xe2, 0xc0, 0xb7, 0xae, 0xb5, 0x50, - 0xc9, 0x26, 0xf0, 0xa2, 0xf5, 0x21, 0x23, 0x79, 0x23, 0xb6, 0x8f, 0x57, - 0x64, 0xd1, 0x27, 0xc2, 0x07, 0x63, 0xa6, 0x54, 0x1f, 0x2f, 0xca, 0x16, - 0xb8, 0x28, 0x51, 0x2a, 0x92, 0xe0, 0x06, 0x36, 0x55, 0x00, 0x6c, 0x99, - 0x31, 0xa7, 0x56, 0xb3, 0x7b, 0x15, 0xcd, 0xc1, 0x32, 0x3a, 0xc0, 0x37, - 0x1f, 0xea, 0x29, 0xb6, 0x75, 0xdf, 0x8a, 0x17, 0x09, 0x45, 0xc2, 0x6e, - 0xe2, 0x4c, 0xa5, 0x93, 0x9b, 0x17, 0x08, 0x27, 0x75, 0x33, 0xdb, 0x1f, - 0xab, 0x37, 0xad, 0x8e, 0xaa, 0xef, 0x0b, 0x82, 0xaa, 0xa7, 0xae, 0x2c, - 0x43, 0x4d, 0x8f, 0xa0, 0x43, 0xd7, 0xa1, 0x34, 0xeb, 0xc0, 0x4e, 0xbd, - 0x64, 0xfc, 0xc8, 0x6a, 0x56, 0xa8, 0xfc, 0x9e, 0x2d, 0x5f, 0x7a, 0xa3, - 0x72, 0x06, 0x79, 0x38, 0x33, 0x05, 0xa7, 0xf0, 0x09, 0x48, 0x55, 0xfe, - 0x3f, 0xab, 0x25, 0x8e, 0x76, 0x1d, 0x12, 0x5a, 0x20, 0x68, 0xfb, 0x51, - 0x51, 0x33, 0x40, 0x37, 0x0c, 0x90, 0x98, 0x6f, 0x66, 0x3f, 0x40, 0xa2, - 0x2e, 0x3c, 0xd1, 0x22, 0x51, 0x54, 0x25, 0x7e, 0x4c, 0x5d, 0x96, 0xb2, - 0x65, 0x0f, 0xa3, 0xdf, 0x8e, 0x97, 0xfe, 0xeb, 0xe7, 0xc6, 0x22, 0x2a, - 0x47, 0x3a, 0x78, 0x1b, 0x39, 0x2e, 0xd6, 0xbc, 0x35, 0xb4, 0xf4, 0xc3, - 0xf2, 0x6a, 0x12, 0xc9, 0xe7, 0x6c, 0x9a, 0xfc, 0xed, 0xbc, 0x11, 0xc7, - 0x71, 0x09, 0x8f, 0x56, 0xc1, 0xd8, 0xb6, 0x92, 0x35, 0x97, 0x8e, 0x71, - 0xd2, 0xbb, 0xb4, 0xed, 0xf0, 0x7e, 0xff, 0x58, 0xd9, 0x95, 0x26, 0xea, - 0xa9, 0x4d, 0x38, 0x8d, 0x4e, 0x8e, 0x53, 0xae, 0x7e, 0xe6, 0xe6, 0x82, - 0x35, 0x96, 0xab, 0x0f, 0x04, 0x0f, 0xf2, 0xac, 0x1b, 0xcd, 0x07, 0x17, - 0x1b, 0x25, 0x2f, 0x92, 0xaf, 0x19, 0xa2, 0x1b, 0xa0, 0x7a, 0xc7, 0x4f, - 0xb8, 0x1b, 0x89, 0x21, 0xb5, 0xe2, 0x24, 0xe9, 0x78, 0xae, 0x7d, 0xd7, - 0xcc, 0x8e, 0x3f, 0xa7, 0xe9, 0xbe, 0xe6, 0x79, 0x0f, 0xdf, 0x86, 0xe9, - 0xb9, 0xcd, 0x82, 0x7b, 0xf5, 0x04, 0x89, 0xa0, 0x73, 0x5d, 0xa2, 0x4e, - 0xd6, 0xa0, 0x60, 0x21, 0xe2, 0xfe, 0xd3, 0xf4, 0x19, 0x8b, 0x6a, 0x03, - 0x12, 0x9c, 0x51, 0x9a, 0x41, 0x4e, 0xf6, 0xb4, 0x6e, 0x0c, 0x43, 0xf5, - 0x00, 0x00, 0x78, 0x12, 0xdd, 0x21, 0xa8, 0xc7, 0x21, 0xa1, 0x4e, 0x44, - 0x10, 0xd0, 0xdb, 0x6f, 0x0b, 0x4c, 0xe7, 0x7a, 0x8c, 0x0c, 0xaa, 0xb6, - 0x9a, 0x7d, 0xa9, 0xff, 0x5a, 0x2e, 0x15, 0x9e, 0x6f, 0xea, 0xe1, 0x42, - 0x0c, 0x9c, 0x5a, 0x3b, 0xd5, 0xe6, 0xde, 0x23, 0x3f, 0x9c, 0x45, 0x20, - 0x67, 0x96, 0x50, 0x16, 0x80, 0x42, 0xe7, 0x67, 0x7d, 0x24, 0xdc, 0x00, - 0xaa, 0x01, 0x8a, 0xa3, 0x61, 0xfe, 0x9a, 0xce, 0xc1, 0xe5, 0x2e, 0x19, - 0x85, 0x04, 0xe6, 0x7b, 0xe8, 0x7a, 0xbc, 0x9d, 0xfe, 0x71, 0x29, 0x1d, - 0x17, 0xae, 0x6b, 0x1a, 0x64, 0xd7, 0xfe, 0x18, 0x29, 0x07, 0x9b, 0x49, - 0x43, 0xba, 0x29, 0x37, 0xa8, 0xb0, 0x26, 0x27, 0x6b, 0x7d, 0xde, 0x49, - 0x12, 0x90, 0x05, 0xe2, 0x2c, 0xd8, 0x08, 0xd0, 0x5d, 0x74, 0xa7, 0x15, - 0xbe, 0x34, 0x34, 0x6d, 0xad, 0xfb, 0xa8, 0x01, 0x4a, 0x6c, 0x98, 0xba, - 0x84, 0x38, 0xbd, 0x05, 0xe8, 0x87, 0x27, 0x91, 0x3f, 0xb8, 0xe9, 0x06, - 0x27, 0xda, 0x56, 0x07, 0xaa, 0xea, 0xf4, 0x80, 0x5c, 0x12, 0x44, 0xbe, - 0x23, 0xb3, 0x63, 0x9f, 0x5f, 0x37, 0xa7, 0x53, 0x4c, 0xfc, 0x4d, 0x87, - 0xeb, 0x91, 0xe8, 0xd7, 0x5a, 0xd6, 0xca, 0x67, 0x2d, 0x2f, 0x5a, 0x0e, - 0xc7, 0x82, 0x78, 0xa4, 0xf3, 0x56, 0x07, 0xa5, 0xab, 0x6d, 0x09, 0xd2, - 0x0d, 0x08, 0x6b, 0x6e, 0x1f, 0xc1, 0xf2, 0x91, 0x1a, 0x39, 0xfe, 0x14, - 0x56, 0x3f, 0xeb, 0x9f, 0x14, 0xc2, 0xb3, 0xb2, 0xc2, 0x8d, 0xc2, 0xee, - 0x7e, 0xf0, 0x7d, 0x92, 0xd2, 0xc3, 0x57, 0x3e, 0x2c, 0x07, 0x1b, 0x6a, - 0x9b, 0x3b, 0x79, 0x59, 0xc9, 0x22, 0x96, 0x6c, 0x3e, 0x37, 0xd3, 0x0e, - 0x5c, 0xf6, 0x8f, 0xa9, 0xaa, 0xc9, 0xa4, 0x4b, 0xaf, 0x5d, 0x1a, 0xb6, - 0xf3, 0x91, 0x32, 0x4f, 0xca, 0x72, 0xa0, 0x42, 0x01, 0x51, 0xaf, 0x19, - 0x89, 0xc4, 0xcc, 0x9b, 0xf3, 0x52, 0xe9, 0xa6, 0xf2, 0x71, 0x6f, 0x5a, - 0x38, 0x02, 0xb8, 0x75, 0x88, 0x5f, 0x8d, 0x12, 0xc5, 0x55, 0x4f, 0xd1, - 0xba, 0xf2, 0x24, 0xdc, 0x63, 0x5f, 0x93, 0xc7, 0xf3, 0xe7, 0x59, 0xac, - 0xc3, 0xed, 0xbc, 0x02, 0xe3, 0xad, 0xb2, 0x8e, 0x2c, 0x2d, 0x47, 0xb4, - 0x34, 0x8d, 0xae, 0x44, 0xc8, 0x5f, 0x14, 0xe8, 0x8e, 0x7b, 0xc3, 0x60, - 0x53, 0x9a, 0x51, 0xea, 0x7f, 0x2f, 0xb6, 0x62, 0x61, 0xf7, 0xc0, 0x18, - 0x0f, 0x20, 0x79, 0x13, 0x5c, 0xe8, 0xca, 0x04, 0x29, 0x5f, 0x70, 0x4d, - 0x88, 0xa2, 0x43, 0x20, 0x57, 0x33, 0x04, 0x74, 0x8e, 0x7c, 0x89, 0xd4, - 0x56, 0x8f, 0x93, 0x86, 0x81, 0x6c, 0x11, 0xfc, 0x32, 0x0e, 0xb0, 0x3e, - 0xe5, 0x13, 0xbf, 0x76, 0x62, 0xcc, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x0e, 0xf8, 0x8f, 0xde, 0xfd, 0xfd, 0xcf, 0xd1, - 0x6f, 0x9f, 0xf2, 0xb6, 0xb6, 0x59, 0xb2, 0x73, 0x1c, 0x3c, 0x0d, 0xb0, - 0x4d, 0xb8, 0x96, 0xc6, 0xeb, 0xe5, 0xf8, 0x0d, 0x3e, 0xd7, 0x0c, 0xbd, - 0x9c, 0xaa, 0xd5, 0x1c, 0x19, 0x9a, 0x4c, 0x8e, 0xfa, 0xac, 0x68, 0x74, - 0x16, 0x06, 0xb5, 0x49, 0xe7, 0xd5, 0x6f, 0x4f, 0xcc, 0xd9, 0x02, 0x74, - 0xd6, 0x08, 0x73, 0x7c, 0xa9, 0xfa, 0x3e, 0x50, 0x87, 0xf7, 0xfb, 0xa6, - 0x94, 0xdc, 0xb1, 0x40, 0xec, 0xa7, 0xa9, 0x39, 0xff, 0x40, 0x4a, 0x97, - 0x9b, 0xcc, 0x57, 0x66, 0x68, 0xd6, 0xa8, 0x4d, 0x13, 0x06, 0x0e, 0x03, - 0xc4, 0xdf, 0x7a, 0xe4, 0x2f, 0x0e, 0xd7, 0x54, 0xe0, 0xbd, 0x93, 0xeb, - 0x82, 0xd8, 0x05, 0x2d, 0xa2, 0xf0, 0x4e, 0xd0, 0xf9, 0x3e, 0x3e, 0x6b, - 0x3d, 0x08, 0x39, 0x4e, 0x35, 0x13, 0x7b, 0x3b, 0x39, 0x2c, 0x47, 0x2c, - 0x61, 0x9f, 0xfd, 0x59, 0x88, 0x5f, 0x65, 0x08, 0xa9, 0x66, 0xec, 0xb5, - 0x21, 0xf3, 0xe9, 0xba, 0x11, 0x63, 0x24, 0x6c, 0xf4, 0x50, 0x3a, 0xe5, - 0x0c, 0x06, 0x39, 0x69, 0x2f, 0xca, 0x0f, 0x48, 0xbe, 0x95, 0x7d, 0x13, - 0x3d, 0xa5, 0x75, 0x69, 0x85, 0xc8, 0xb3, 0x72, 0x72, 0x3c, 0x4f, 0x96, - 0xe7, 0xb7, 0xbd, 0xe7, 0x76, 0xba, 0xac, 0xc0, 0x07, 0x4d, 0xc1, 0xed, - 0xb9, 0xf0, 0x91, 0x2e, 0x36, 0xb7, 0x5b, 0x1c, 0xb7, 0xd6, 0xb3, 0x45, - 0x7d, 0x0a, 0xf5, 0x43, 0xdd, 0x7a, 0x8b, 0x4e, 0x18, 0xf2, 0xf3, 0x19, - 0xcd, 0x4a, 0xda, 0x3c, 0x1b, 0x05, 0x27, 0x67, 0x43, 0xa9, 0x8e, 0xe7, - 0x4a, 0x95, 0xa9, 0xad, 0x6c, 0x8c, 0xb2, 0x2e, 0x12, 0xcb, 0xf3, 0xeb, - 0x65, 0x26, 0xf4, 0x3e, 0x86, 0xee, 0x7e, 0xd9, 0xba, 0xce, 0x8d, 0x15, - 0x3e, 0xa8, 0x40, 0x59, 0x1d, 0x27, 0x78, 0x75, 0xf0, 0xf9, 0x33, 0xb5, - 0x32, 0xa9, 0x66, 0xe6, 0x2e, 0x2e, 0x3d, 0xf5, 0x4a, 0xf0, 0x97, 0x2d, - 0xe7, 0x43, 0x85, 0x43, 0x61, 0x25, 0x15, 0x13, 0x9e, 0x8e, 0xf6, 0x78, - 0xe8, 0x67, 0xba, 0xc2, 0x6d, 0xda, 0x46, 0x25, 0x76, 0xd9, 0x9b, 0x69, - 0x95, 0x4b, 0x50, 0x8c, 0xb7, 0x36, 0x49, 0xbc, 0xd7, 0x39, 0x69, 0xb9, - 0xc1, 0x5f, 0x5f, 0xcc, 0x83, 0x4c, 0x16, 0xb8, 0x0c, 0x85, 0xf1, 0xa4, - 0x57, 0x6c, 0x22, 0x1f, 0x60, 0x0c, 0xff, 0xb6, 0xc9, 0xf7, 0x21, 0x2d, - 0x35, 0x78, 0x31, 0x79, 0xd0, 0x6d, 0x61, 0xec, 0x61, 0x04, 0x75, 0x5c, - 0x06, 0xc3, 0x53, 0x1b, 0xb5, 0xdc, 0x23, 0xb9, 0xd9, 0x07, 0xd1, 0xd0, - 0xb3, 0xa5, 0xab, 0xd9, 0xbe, 0xb7, 0xdc, 0xae, 0x3f, 0x3e, 0xd7, 0x2a, - 0x79, 0x3f, 0x9c, 0x27, 0x81, 0x8d, 0x61, 0xe8, 0x46, 0x8f, 0x05, 0xf4, - 0x9c, 0x30, 0x35, 0x9a, 0x2f, 0x62, 0x84, 0x7c, 0xa5, 0x95, 0x68, 0x34, - 0xe6, 0xf0, 0xb9, 0x42, 0xd4, 0x37, 0xc6, 0xd2, 0x35, 0x1f, 0x7b, 0xe0, - 0xa6, 0x92, 0xcf, 0xf7, 0x0f, 0x08, 0x10, 0x79, 0xbd, 0xa8, 0x7c, 0x4e, - 0xef, 0xf1, 0x01, 0x8d, 0x1b, 0x0c, 0x98, 0x46, 0x28, 0xdc, 0xd5, 0xa8, - 0xcf, 0x67, 0x7d, 0x87, 0x2a, 0x8f, 0xdd, 0x52, 0x43, 0x5a, 0x55, 0x80, - 0x88, 0xa6, 0xcd, 0x9c, 0x5d, 0x36, 0xae, 0xef, 0x61, 0x43, 0xec, 0xf0, - 0x7f, 0x92, 0x21, 0x1f, 0xa2, 0xa3, 0x76, 0x0e, 0x5d, 0xf3, 0xa7, 0xe7, - 0x7d, 0xb0, 0x2c, 0x94, 0x36, 0x95, 0x34, 0x4e, 0x04, 0xfb, 0x51, 0xf9, - 0xe6, 0x7e, 0x56, 0x7a, 0x59, 0xce, 0x0a, 0x45, 0x7e, 0xeb, 0xc4, 0xbc, - 0xfd, 0x20, 0xaa, 0x34, 0x6b, 0xee, 0x3b, 0x09, 0xe8, 0x00, 0x4b, 0xfc, - 0x68, 0x24, 0x43, 0xdb, 0x09, 0x58, 0xd0, 0xb6, 0xbf, 0xaf, 0x1d, 0x7f, - 0x8a, 0x4c, 0x9e, 0x51, 0x97, 0x97, 0xe1, 0x0c, 0x0d, 0xaf, 0xd1, 0x1e, - 0x62, 0xad, 0x70, 0xa5, 0x8a, 0x24, 0x2f, 0x4a, 0xa6, 0x55, 0xb1, 0x44, - 0x09, 0x88, 0xab, 0xa5, 0x45, 0x28, 0xa0, 0x34, 0x9e, 0x14, 0x2c, 0xf9, - 0x0f, 0xb8, 0x33, 0x8f, 0xcc, 0xba, 0x50, 0x34, 0x4c, 0x96, 0x89, 0x09, - 0xb9, 0xa8, 0xfb, 0xac, 0x59, 0x73, 0xea, 0x61, 0xbc, 0x0d, 0x24, 0x3a, - 0x20, 0xc2, 0x76, 0xfc, 0x2e, 0xce, 0xfb, 0x75, 0x00, 0xca, 0x58, 0xbd, - 0xab, 0x61, 0x9b, 0x13, 0x2b, 0xa3, 0xf6, 0x15, 0x55, 0x83, 0x23, 0xc4, - 0xf3, 0x4c, 0x89, 0xc5, 0x4a, 0x18, 0x5c, 0x8d, 0x41, 0xcc, 0x06, 0x7b, - 0xe3, 0x2a, 0x1f, 0x6a, 0x57, 0xbc, 0x54, 0x61, 0x0c, 0xf2, 0xec, 0xbf, - 0xb0, 0xf0, 0x21, 0xde, 0xfc, 0xe4, 0xef, 0xce, 0x47, 0xc8, 0xdc, 0x11, - 0xc7, 0x8a, 0x12, 0x97, 0x68, 0x1d, 0x9e, 0x9a, 0xbf, 0xad, 0x62, 0x7e, - 0x4b, 0x88, 0xd7, 0x20, 0x22, 0xce, 0x5e, 0xe3, 0x87, 0x12, 0xa3, 0x05, - 0xef, 0x1f, 0x05, 0xb1, 0xbd, 0x1b, 0x80, 0x43, 0x84, 0x33, 0x8b, 0x87, - 0xa5, 0xc2, 0xe1, 0x49, 0xa8, 0x75, 0x49, 0x9b, 0x1b, 0x64, 0x8a, 0xd0, - 0x86, 0x10, 0xa8, 0x72, 0xeb, 0x2e, 0xe7, 0x3f, 0xaa, 0x6b, 0x4a, 0x22, - 0xae, 0x17, 0x8f, 0x10, 0x22, 0x03, 0x66, 0x67, 0x35, 0x40, 0x29, 0x1e, - 0xf2, 0x05, 0x36, 0xd5, 0xed, 0xe2, 0x2a, 0xcc, 0x77, 0xe2, 0x16, 0xef, - 0xa7, 0x9b, 0xe1, 0x1b, 0xba, 0xf3, 0xf5, 0x74, 0x6c, 0x2a, 0x98, 0x8a, - 0x14, 0xaf, 0x2c, 0xab, 0xfb, 0x51, 0x53, 0x75, 0x17, 0xcb, 0x5c, 0x86, - 0xb5, 0x60, 0x70, 0x29, 0x65, 0x69, 0x49, 0x42, 0x4f, 0x42, 0x6b, 0xc7, - 0xdb, 0x98, 0x7d, 0x1e, 0xf8, 0x45, 0xb2, 0x33, 0xd6, 0x34, 0x26, 0xa6, - 0x7f, 0x76, 0x31, 0x13, 0x13, 0x9d, 0xd2, 0xb0, 0x30, 0x0b, 0x0b, 0x3e, - 0x1a, 0x84, 0xb0, 0xbd, 0x81, 0x34, 0x25, 0x73, 0x99, 0x87, 0x1a, 0xc8, - 0x44, 0x34, 0x9d, 0x1a, 0x3d, 0x76, 0x44, 0x1d, 0xe2, 0x22, 0xad, 0x3d, - 0xb2, 0xa3, 0x1c, 0xd5, 0x27, 0x8c, 0xc6, 0x84, 0xdf, 0x33, 0xbe, 0xb2, - 0xa7, 0xb9, 0xc5, 0x6e, 0x48, 0xdc, 0xe9, 0xf8, 0xef, 0xfc, 0xaa, 0x1f, - 0x5e, 0x41, 0x48, 0x1e, 0xe0, 0xb9, 0xd6, 0x6e, 0x7a, 0x9c, 0xa3, 0x98, - 0x4b, 0xfa, 0x90, 0xa4, 0x58, 0x33, 0x85, 0x3b, 0x11, 0x44, 0x83, 0x4b, - 0x1e, 0x0e, 0x5d, 0x11, 0x36, 0x15, 0xe1, 0xbf, 0x15, 0x04, 0x8e, 0x88, - 0xc6, 0x18, 0x53, 0xc3, 0x8d, 0x28, 0x86, 0x25, 0xef, 0x55, 0x7b, 0xf6, - 0x85, 0xf8, 0xed, 0x3b, 0xcf, 0x5d, 0xa6, 0xc7, 0x66, 0xb7, 0xbe, 0x14, - 0xf0, 0x62, 0x89, 0x1f, 0x32, 0x1e, 0x86, 0x2a, 0x93, 0xd5, 0xca, 0x37, - 0x03, 0x0b, 0xf8, 0x0f, 0xca, 0x50, 0x6c, 0x16, 0x2b, 0xf0, 0x77, 0xca, - 0xbb, 0x8e, 0x95, 0x11, 0xef, 0x5b, 0xbe, 0x2f, 0x62, 0x50, 0xb8, 0x3d, - 0xff, 0xfa, 0x30, 0x21, 0xb2, 0x86, 0x3f, 0x50, 0x57, 0x98, 0x79, 0x15, - 0xce, 0x3e, 0xbf, 0x49, 0x58, 0xb0, 0xb5, 0xd7, 0xbe, 0x01, 0x55, 0xee, - 0x60, 0x14, 0x9d, 0x5b, 0x57, 0x48, 0x05, 0x72, 0x6a, 0x23, 0x29, 0xeb, - 0xf3, 0x36, 0x2a, 0xc1, 0xda, 0x5e, 0x4a, 0x63, 0xc4, 0x6b, 0x04, 0xe8, - 0xe8, 0xc1, 0xb5, 0xc4, 0x2d, 0x60, 0x1f, 0xa0, 0x2b, 0x33, 0xa5, 0xb7, - 0x82, 0x59, 0x21, 0xba, 0x13, 0xda, 0x79, 0xda, 0x5a, 0xb1, 0x82, 0x5b, - 0x52, 0x7f, 0x0c, 0x70, 0x75, 0x65, 0xe0, 0x44, 0xb3, 0xca, 0xd0, 0x09, - 0x38, 0x24, 0x83, 0x8e, 0x0c, 0x4c, 0xef, 0x96, 0xe4, 0x04, 0x30, 0x46, - 0x23, 0x6a, 0x28, 0x13, 0x1d, 0x37, 0x14, 0x75, 0x6e, 0xd0, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x21, 0xa2, 0xf0, 0x7d, - 0x29, 0x8f, 0x62, 0x2e, 0xf4, 0x0e, 0x14, 0x9b, 0x60, 0x38, 0xc0, 0x95, - 0xfb, 0x3c, 0x90, 0x5a, 0xa0, 0x1f, 0x30, 0x09, 0xfc, 0x6d, 0xa9, 0xd1, - 0x7b, 0x0b, 0x7c, 0x78, 0xf9, 0xf6, 0xa8, 0x5e, 0xa6, 0x7a, 0xf6, 0x1c, - 0xab, 0x1b, 0x0e, 0xa9, 0x08, 0xfd, 0xd9, 0x97, 0x08, 0x24, 0x2b, 0xda, - 0x08, 0x8b, 0x0c, 0x07, 0x70, 0x15, 0xa8, 0x0c, 0x86, 0xfc, 0xd1, 0x84, - 0xba, 0xd0, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x35, 0x7a, 0xab, 0xaa, 0xbe, 0xd7, 0xad, 0x22, 0x99, 0x46, 0xbb, 0x78, - 0xfd, 0x47, 0x8f, 0x2a, 0x4a, 0xa6, 0x2f, 0x8d, 0x15, 0x07, 0xed, 0x26, - 0x1d, 0xb3, 0x12, 0xd3, 0x88, 0x0f, 0xf1, 0x75, 0x2a, 0x07, 0x62, 0xac, - 0xbf, 0x52, 0x4a, 0xc3, 0x12, 0xe5, 0x3c, 0xea, 0xa6, 0x1e, 0x57, 0x90, - 0x56, 0x60, 0x7d, 0xcf, 0x4b, 0x65, 0xaf, 0xee, 0x17, 0x56, 0xbe, 0xd2, - 0x38, 0x3f, 0xd6, 0xbc, 0xef, 0xa7, 0x32, 0xb7, 0x10, 0xe9, 0xbd, 0x97, - 0x45, 0x92, 0x3c, 0xd3, 0x35, 0x2e, 0x59, 0x37, 0x65, 0x5c, 0x7f, 0xd0, - 0x99, 0x9c, 0x01, 0xe9, 0x1f, 0x65, 0xe9, 0xec, 0x0f, 0x2d, 0x46, 0xbc, - 0xd4, 0x8f, 0x51, 0x1c, 0xa0, 0xa4, 0x9b, 0x4f, 0x95, 0x54, 0xb0, 0x50, - 0x74, 0xfa, 0x0f, 0xe6, 0x55, 0x81, 0xce, 0x0f, 0xd1, 0x25, 0x56, 0xc8, - 0x2f, 0x3a, 0x65, 0xd4, 0x86, 0x4a, 0x8e, 0xff, 0x5a, 0xcc, 0x67, 0x96, - 0xcc, 0x65, 0x0d, 0x20, 0xee, 0xba, 0x6b, 0xcb, 0xde, 0x10, 0x2f, 0xbf, - 0x67, 0x6d, 0xbe, 0xef, 0x72, 0xfc, 0x25, 0x62, 0xbf, 0xbb, 0xc5, 0xe0, - 0x7b, 0x4c, 0x32, 0xc5, 0xdb, 0x9f, 0xb5, 0xe2, 0x75, 0x8a, 0xba, 0xbb, - 0x69, 0x28, 0xb6, 0x41, 0x25, 0x83, 0x67, 0x35, 0x1b, 0xd7, 0xb3, 0xd7, - 0x58, 0x54, 0x8a, 0x0b, 0x7c, 0xf3, 0x05, 0xcf, 0x2c, 0x78, 0x70, 0xc6, - 0xed, 0x7e, 0x56, 0xb6, 0x4e, 0x48, 0xaa, 0x57, 0xc4, 0xb0, 0xb2, 0xa0, - 0xca, 0x50, 0xe1, 0xc7, 0x41, 0xea, 0xac, 0x5f, 0x18, 0x13, 0xe5, 0x85, - 0x78, 0x3f, 0x05, 0xf3, 0xfd, 0x74, 0x7a, 0x42, 0x61, 0x91, 0x19, 0xc6, - 0x19, 0xe9, 0xd2, 0x78, 0x2c, 0xb1, 0xa3, 0x7f, 0x62, 0xea, 0x2a, 0x35, - 0x1c, 0x55, 0xa3, 0xf7, 0xdc, 0xec, 0x48, 0x23, 0x99, 0x8d, 0xe1, 0x4d, - 0x45, 0xad, 0x92, 0xc6, 0xf4, 0xa2, 0xe5, 0xe6, 0x58, 0xe4, 0xd5, 0x37, - 0xd0, 0x47, 0x0b, 0x64, 0x68, 0x48, 0x7e, 0xeb, 0xbe, 0x5e, 0x74, 0xd1, - 0xc4, 0xa5, 0x60, 0xd0, 0x30, 0x62, 0xbc, 0x81, 0xc4, 0x01, 0x68, 0x18, - 0xf3, 0xac, 0x9d, 0xb1, 0x4d, 0xdd, 0x8b, 0xd2, 0x54, 0x5d, 0xd1, 0x1c, - 0xee, 0x75, 0x9e, 0x99, 0x42, 0x69, 0x38, 0xcc, 0x66, 0x24, 0xd9, 0x8f, - 0x70, 0x98, 0xc3, 0x5e, 0x08, 0xf0, 0xd8, 0x2d, 0xe6, 0x52, 0x48, 0xdf, - 0xd0, 0x03, 0x04, 0x92, 0xab, 0xa1, 0xa1, 0x2f, 0x7d, 0x84, 0xb2, 0x82, - 0x51, 0x56, 0x74, 0x4a, 0x94, 0xff, 0xd2, 0xe4, 0x4e, 0x1a, 0xbd, 0x18, - 0xab, 0x33, 0x68, 0x0e, 0x4f, 0x99, 0x1d, 0x7e, 0x02, 0x3f, 0x1f, 0x50, - 0x05, 0xf8, 0x59, 0x47, 0x97, 0x98, 0x60, 0xb1, 0x30, 0xb1, 0x14, 0xac, - 0x2c, 0x0a, 0xa8, 0x97, 0x83, 0xf5, 0x5a, 0x5c, 0x87, 0xe5, 0x36, 0x26, - 0xec, 0xb4, 0x94, 0x46, 0x9a, 0xad, 0x2b, 0x9a, 0xb7, 0xac, 0xc4, 0x1a, - 0x55, 0x53, 0xc0, 0x16, 0x91, 0x1c, 0xd6, 0xaa, 0x6b, 0xdd, 0x85, 0x6a, - 0x54, 0xec, 0x7c, 0xa1, 0xd5, 0x18, 0x00, 0x74, 0xd2, 0xf1, 0x7e, 0xad, - 0x7c, 0xa8, 0x85, 0x9b, 0xc0, 0x9f, 0x4f, 0x3b, 0xd9, 0x08, 0xc8, 0x9d, - 0x31, 0x22, 0x7a, 0x53, 0xa8, 0xbd, 0x00, 0xdf, 0xe8, 0x39, 0x52, 0xe9, - 0x14, 0x74, 0x7b, 0x53, 0xf9, 0xbd, 0x29, 0x8e, 0x5d, 0xf2, 0x35, 0x3b, - 0xe3, 0x48, 0xbf, 0xa0, 0xc4, 0x3d, 0x40, 0xb4, 0xf2, 0x7c, 0xd0, 0xe3, - 0x17, 0x11, 0x5b, 0xd6, 0x55, 0xd2, 0x54, 0xcf, 0x20, 0x8d, 0x74, 0x4a, - 0x6b, 0xe9, 0x5d, 0xfe, 0x72, 0x14, 0x6a, 0x11, 0x8b, 0x14, 0x19, 0xba, - 0x63, 0xe4, 0x6b, 0x39, 0xb4, 0x90, 0x67, 0x79, 0x56, 0x31, 0xd3, 0xb5, - 0xeb, 0x9e, 0x95, 0x4b, 0x1e, 0x04, 0x20, 0xd8, 0xbe, 0xe8, 0x1c, 0xd7, - 0x95, 0xcb, 0x57, 0x60, 0xe6, 0x11, 0x35, 0x42, 0x90, 0xfd, 0xb2, 0xe4, - 0x9b, 0x24, 0x70, 0xc0, 0xc3, 0xa9, 0x8a, 0xc9, 0x46, 0xd0, 0xea, 0xc9, - 0x93, 0x7d, 0x9f, 0x64, 0x12, 0x54, 0x09, 0xb7, 0xc2, 0x4d, 0x6e, 0xcc, - 0x60, 0x07, 0x36, 0x31, 0x64, 0x3d, 0x1e, 0xd3, 0x86, 0x47, 0x47, 0x42, - 0x76, 0xb6, 0xf0, 0xe5, 0xb4, 0xe7, 0xbe, 0x47, 0x91, 0x78, 0xbe, 0x06, - 0xf1, 0x6e, 0x58, 0xce, 0x32, 0x13, 0x26, 0x34, 0x92, 0xae, 0xb2, 0x29, - 0xd0, 0x30, 0x55, 0xfd, 0x89, 0x6a, 0xbf, 0x3e, 0xdf, 0x11, 0x39, 0xe4, - 0xfd, 0x56, 0xd7, 0x2f, 0x89, 0x96, 0x08, 0x54, 0xaa, 0xab, 0x8b, 0xfa, - 0x65, 0xe5, 0x64, 0xff, 0x24, 0x25, 0x8f, 0x7d, 0xf6, 0xb1, 0x7f, 0x2f, - 0xa6, 0xf6, 0x46, 0xab, 0x61, 0xfd, 0x47, 0xad, 0x6d, 0x38, 0x6d, 0xc1, - 0xe9, 0x4a, 0xf1, 0x85, 0x05, 0x0e, 0x69, 0x48, 0x7c, 0xa6, 0x76, 0x61, - 0xe3, 0x94, 0xf2, 0xd6, 0x7a, 0x9c, 0x79, 0xc0, 0x2a, 0x51, 0x23, 0xc6, - 0xaf, 0x29, 0x04, 0x0f, 0x47, 0xc2, 0x93, 0xd7, 0x64, 0xe5, 0x37, 0x2e, - 0x53, 0x3b, 0xb7, 0x7c, 0x9c, 0xb4, 0x63, 0x13, 0xc7, 0x56, 0x90, 0xe9, - 0x53, 0xd5, 0x86, 0x2b, 0x96, 0x41, 0x42, 0x56, 0xc5, 0x16, 0xd7, 0x9e, - 0x30, 0xce, 0xa1, 0x0d, 0x93, 0x5d, 0x11, 0x07, 0xb2, 0x95, 0xfd, 0xf6, - 0x0b, 0x28, 0x95, 0x1a, 0x8f, 0xfa, 0xe1, 0x57, 0x7e, 0x06, 0xff, 0x18, - 0xaf, 0xe3, 0x4f, 0x3c, 0x34, 0x5b, 0xd4, 0x46, 0x1a, 0xd1, 0xd1, 0x7e, - 0x55, 0xba, 0x5d, 0x2a, 0x1f, 0x42, 0x49, 0x95, 0x75, 0x5f, 0x80, 0x60, - 0x02, 0x01, 0xdb, 0x36, 0xad, 0x68, 0x69, 0x1e, 0x0b, 0x90, 0x3f, 0xa6, - 0xb6, 0x2f, 0x66, 0xa6, 0x7d, 0x81, 0x8c, 0xa0, 0xee, 0x05, 0x95, 0xbc, - 0xb3, 0x7c, 0x18, 0xd4, 0x1b, 0x40, 0x96, 0xf5, 0x05, 0x9d, 0x27, 0x3b, - 0x78, 0xfc, 0x19, 0x18, 0xc0, 0x61, 0xa0, 0xd6, 0xf9, 0xc0, 0x3f, 0xe5, - 0x48, 0x35, 0x0f, 0x8b, 0x0d, 0xfb, 0x31, 0xb7, 0x32, 0x40, 0x1d, 0x69, - 0x12, 0x5a, 0x23, 0xf0, 0xce, 0xe9, 0x5e, 0xa6, 0x68, 0x6b, 0xe1, 0xe2, - 0x68, 0x07, 0x02, 0x0d, 0x7a, 0xc2, 0x0a, 0x40, 0x10, 0x5e, 0x94, 0xba, - 0x77, 0x1d, 0xf7, 0xac, 0xec, 0x79, 0xa9, 0xa1, 0x8a, 0xb8, 0x49, 0x32, - 0x08, 0xe0, 0x18, 0xa8, 0x3d, 0x69, 0x41, 0x5d, 0x30, 0x3b, 0xb6, 0x91, - 0x46, 0x8d, 0x81, 0x10, 0xb0, 0xc2, 0xed, 0xa0, 0x4e, 0x59, 0x48, 0xd8, - 0x64, 0x7d, 0x2d, 0x46, 0xf2, 0x8a, 0x2e, 0x5d, 0x0c, 0x4d, 0x9f, 0xfe, - 0x7b, 0x5e, 0xbf, 0x1a, 0x78, 0xdf, 0xfc, 0x0f, 0x04, 0x37, 0x72, 0x1a, - 0x09, 0xb8, 0x6e, 0x1b, 0xf1, 0x18, 0x7d, 0x83, 0x44, 0xaa, 0x9b, 0x71, - 0xe1, 0x03, 0x04, 0x83, 0xe5, 0xaa, 0xc0, 0xd4, 0xa7, 0x80, 0x10, 0x35, - 0x09, 0xae, 0xf7, 0xe1, 0x5e, 0x7c, 0x31, 0x20, 0x43, 0x82, 0xda, 0x07, - 0x39, 0xfe, 0x8f, 0x9d, 0x70, 0x3c, 0x57, 0x43, 0x01, 0x51, 0x37, 0x2e, - 0x97, 0xef, 0xcf, 0x05, 0x44, 0x75, 0x69, 0xf7, 0xdb, 0xda, 0x80, 0x78, - 0x0c, 0xcc, 0xc1, 0x49, 0xac, 0x3b, 0x7e, 0x27, 0x6a, 0xbb, 0xdf, 0x45, - 0x5b, 0x3b, 0x29, 0xf6, 0x1b, 0xa9, 0x25, 0xf9, 0x2f, 0xcf, 0x37, 0x71, - 0x33, 0xb4, 0x90, 0xd7, 0x9b, 0x87, 0x41, 0x15, 0xd1, 0xa6, 0x39, 0xa7, - 0xa9, 0xcd, 0x66, 0x29, 0x59, 0xb4, 0x53, 0x12, 0xa1, 0x20, 0xd5, 0x04, - 0xca, 0x40, 0x31, 0xfa, 0x6f, 0xbb, 0x92, 0x04, 0xf3, 0xc2, 0x10, 0x0d, - 0xc1, 0x19, 0x78, 0x8c, 0x82, 0xed, 0x92, 0x3a, 0x6b, 0xd1, 0x3d, 0xe8, - 0xac, 0x55, 0xe4, 0x8c, 0xc6, 0xd4, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, 0xc2, 0x1d, 0x86, 0xe4, 0xf6, 0xa1, 0xbe, 0xf5, - 0xf3, 0x36, 0x9d, 0x32, 0x80, 0x17, 0x3b, 0x1f, 0x18, 0x21, 0xed, 0xa7, - 0xf5, 0xaf, 0xf1, 0x94, 0xe2, 0xa7, 0x08, 0xd5, 0xca, 0x18, 0x45, 0xf5, - 0x68, 0x94, 0x82, 0x61, 0xf7, 0xb7, 0xb2, 0xfa, 0xd4, 0x5e, 0x32, 0xd0, - 0xf0, 0x20, 0x66, 0x83, 0xd1, 0x6b, 0x3c, 0xdf, 0x73, 0xeb, 0x73, 0x82, - 0x09, 0x9b, 0xd0, 0xc5, 0xb0, 0x9f, 0x01, 0x77, 0x85, 0xcc, 0x6e, 0x23, - 0xb7, 0x00, 0x45, 0xe0, 0xa6, 0x01, 0x29, 0x1d, 0x8b, 0xc4, 0xe0, 0xc2, - 0xe0, 0x4f, 0x3b, 0x07, 0xd5, 0xac, 0x6b, 0x88, 0xb8, 0xa4, 0xe2, 0x5c, - 0x19, 0xe9, 0x98, 0x72, 0xa5, 0x6b, 0xf5, 0xa4, 0xf7, 0x15, 0xaf, 0xfb, - 0xb4, 0x80, 0x9a, 0xe3, 0xa5, 0x35, 0x2f, 0x45, 0x81, 0xf1, 0x8b, 0x2d, - 0x26, 0x5c, 0x65, 0xa9, 0x5b, 0x6e, 0x83, 0xc3, 0x62, 0x2f, 0x84, 0xef, - 0x11, 0xa5, 0x58, 0x48, 0xe9, 0x67, 0x7e, 0xd3, 0x0b, 0x5d, 0x51, 0x80, - 0x39, 0x08, 0x8e, 0xc1, 0x0d, 0x04, 0x11, 0x5f, 0x72, 0x64, 0x1f, 0x83, - 0xf8, 0xd3, 0x09, 0x38, 0xb6, 0x7f, 0x50, 0x78, 0x27, 0x20, 0xe5, 0xbd, - 0x16, 0xbf, 0x51, 0xd8, 0x4f, 0x67, 0x60, 0xf6, 0x9e, 0xff, 0x08, 0xfe, - 0xc6, 0x96, 0xd6, 0x64, 0x94, 0x28, 0xc6, 0x9a, 0x09, 0x1a, 0x34, 0x08, - 0x31, 0x4b, 0x0b, 0x97, 0x5a, 0x18, 0x72, 0x49, 0xe9, 0x1d, 0xbb, 0x9c, - 0xed, 0x7e, 0xb5, 0xc5, 0xa7, 0xf4, 0x25, 0x7a, 0x26, 0xe9, 0x15, 0x61, - 0x85, 0x32, 0xc9, 0xb3, 0xcf, 0x95, 0xbf, 0x35, 0x10, 0x2d, 0x71, 0xfe, - 0x03, 0xd6, 0x69, 0x75, 0x8d, 0xb7, 0x16, 0xa7, 0x3d, 0x0e, 0xb7, 0x55, - 0x6d, 0xa7, 0x9f, 0x10, 0x7e, 0x7e, 0xff, 0x39, 0xee, 0x8e, 0xa7, 0x81, - 0x7d, 0x11, 0xea, 0xa9, 0xd6, 0xed, 0x54, 0xf8, 0xd2, 0xd5, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xf9, 0xde, 0x41, 0xe7, - 0xa6, 0x88, 0x53, 0x76, 0x5a, 0x26, 0xc3, 0x5c, 0xf2, 0x58, 0x68, 0x9c, - 0xc7, 0x4e, 0x53, 0x18, 0x53, 0x67, 0x39, 0x23, 0x96, 0xb0, 0xef, 0x58, - 0x29, 0xe1, 0x68, 0xd8, 0xce, 0xc0, 0x41, 0xc2, 0x35, 0x5f, 0x74, 0xfa, - 0xdf, 0xc7, 0x0f, 0x80, 0x50, 0xd1, 0xf6, 0x5a, 0x3a, 0x81, 0xe0, 0xd9, - 0x9b, 0x47, 0x96, 0xcd, 0xc5, 0x0f, 0x91, 0x12, 0x81, 0x77, 0x1e, 0xef, - 0x2e, 0xba, 0x16, 0x51, 0x70, 0x78, 0xdc, 0xa3, 0x84, 0x12, 0x7c, 0x9e, - 0x21, 0x7d, 0xa3, 0x5f, 0xce, 0xa1, 0x25, 0x84, 0x99, 0xa4, 0x2d, 0xa6, - 0x0f, 0x95, 0xef, 0xef, 0x31, 0xe6, 0xf2, 0x18, 0x08, 0x47, 0xd2, 0x5a, - 0x39, 0x01, 0x7a, 0xca, 0xd3, 0x03, 0xb1, 0xc2, 0x48, 0xf4, 0x1f, 0x6d, - 0xc2, 0x8c, 0x5c, 0xda, 0xf5, 0x10, 0xed, 0xfc, 0x2e, 0x0c, 0xb3, 0x52, - 0xaa, 0xa9, 0xed, 0xbc, 0x41, 0xcc, 0xd4, 0x4b, 0x1c, 0xd0, 0xa3, 0x1d, - 0xf4, 0xe7, 0x48, 0x34, 0x4e, 0xcf, 0x3b, 0xb3, 0x71, 0x06, 0xbe, 0x0c, - 0x35, 0xbb, 0xb4, 0x17, 0xd8, 0x8b, 0xba, 0xdd, 0x32, 0x30, 0x51, 0xb1, - 0xb1, 0xd6, 0x3a, 0xdc, 0x3b, 0x25, 0x9a, 0x57, 0xc7, 0x4d, 0xd3, 0x75, - 0x93, 0x59, 0x3e, 0x9b, 0x10, 0xcf, 0xdb, 0x38, 0x75, 0x51, 0xb2, 0x2a, - 0x48, 0x78, 0xfc, 0xaa, 0xe3, 0x91, 0xe7, 0x93, 0xe7, 0x0a, 0x07, 0x2c, - 0xf8, 0x88, 0x93, 0xde, 0x2f, 0xba, 0x7b, 0x72, 0xcd, 0x92, 0xdd, 0xb1, - 0xac, 0x1e, 0xe4, 0xe3, 0x5d, 0xa4, 0x7f, 0x86, 0xa7, 0xcb, 0xb5, 0x81, - 0x86, 0xf1, 0xf5, 0xad, 0xd6, 0x36, 0x08, 0x09, 0x9f, 0x75, 0x6f, 0x4a, - 0x5b, 0x30, 0xf8, 0xaf, 0xd2, 0xbc, 0xb5, 0xbe, 0xf2, 0xeb, 0x9b, 0xbc, - 0x11, 0xd4, 0x0c, 0x14, 0xa6, 0x6f, 0x43, 0xd3, 0xc9, 0x4e, 0xca, 0x9b, - 0x4e, 0x46, 0x60, 0x4c, 0x63, 0xcc, 0x07, 0x36, 0x8c, 0xf2, 0xd1, 0x93, - 0x7a, 0x51, 0x49, 0x15, 0xbf, 0xbf, 0x9e, 0x82, 0x21, 0x06, 0xa0, 0x39, - 0x11, 0x1d, 0x6c, 0x41, 0x72, 0xcd, 0x2a, 0x8a, 0x4a, 0xd0, 0x13, 0x6c, - 0x56, 0xf4, 0x00, 0x48, 0xaf, 0xab, 0xdf, 0xa9, 0xe9, 0xa6, 0xaa, 0x06, - 0x61, 0x79, 0xc4, 0x57, 0x42, 0xca, 0x12, 0x18, 0xcf, 0x81, 0xec, 0x79, - 0x19, 0xd2, 0xd2, 0xe3, 0x1d, 0xc6, 0x6c, 0xd0, 0xd6, 0x0a, 0xfb, 0x70, - 0x42, 0x28, 0x25, 0x23, 0xb6, 0x23, 0x15, 0x28, 0x5e, 0x9f, 0x49, 0xf2, - 0x7b, 0x69, 0x74, 0xa5, 0xb9, 0x26, 0x81, 0xfe, 0x39, 0x3e, 0x3f, 0xc8, - 0x7e, 0x9e, 0x5e, 0x8e, 0xf2, 0xdb, 0x6b, 0xfd, 0xe1, 0xc3, 0x01, 0x4a, - 0xba, 0x8f, 0x33, 0x71, 0x09, 0x80, 0x5d, 0x9c, 0x58, 0x64, 0xb7, 0x90, - 0x13, 0x2a, 0xe9, 0x1d, 0x07, 0x2c, 0x06, 0x70, 0x43, 0x0d, 0xb6, 0x57, - 0x02, 0x3c, 0xbe, 0x3c, 0x42, 0xab, 0x77, 0x15, 0x0e, 0x98, 0xfb, 0xf2, - 0x1d, 0x14, 0xd9, 0xb8, 0xd1, 0x59, 0x2a, 0x67, 0x6f, 0xfc, 0x59, 0x39, - 0x33, 0xe0, 0x49, 0x0b, 0x4e, 0x65, 0x81, 0x9f, 0x71, 0xf2, 0xa5, 0x90, - 0x4f, 0x24, 0xc7, 0x05, 0xfb, 0x77, 0x1e, 0x14, 0xca, 0x2f, 0xfc, 0xac, - 0xec, 0xbf, 0xa2, 0x69, 0x15, 0x0a, 0x6b, 0xa9, 0xa0, 0x74, 0xee, 0xad, - 0xa9, 0x50, 0x4d, 0x4d, 0xab, 0x6e, 0xc1, 0xb3, 0xda, 0xbb, 0xbd, 0xab, - 0x00, 0x05, 0x14, 0xc1, 0xc4, 0x53, 0x7b, 0x78, 0x97, 0x68, 0x3c, 0x05, - 0xf2, 0xed, 0x87, 0xca, 0x86, 0xd1, 0xdf, 0xda, 0xb3, 0x2f, 0x17, 0x87, - 0x87, 0x2f, 0xd8, 0xe9, 0xb2, 0x96, 0xdc, 0x7f, 0x22, 0xf1, 0x2a, 0x9f, - 0xfe, 0x54, 0x55, 0xa1, 0x96, 0xab, 0x9f, 0x61, 0x74, 0xcd, 0x4d, 0x77, - 0x38, 0x02, 0x23, 0x29, 0x28, 0x5b, 0xfc, 0x86, 0x17, 0x40, 0xd4, 0x42, - 0x2a, 0x9b, 0x84, 0xf7, 0x67, 0x2b, 0x3a, 0xc1, 0x31, 0x89, 0x4b, 0x67, - 0xd1, 0x7d, 0x6b, 0x36, 0xec, 0x69, 0x6b, 0x24, 0xca, 0xd6, 0x2d, 0xbb, - 0x21, 0xc8, 0x0c, 0x53, 0x41, 0x29, 0x0b, 0xc1, 0xfe, 0xd5, 0xa3, 0x4c, - 0x66, 0x2f, 0xc7, 0xf1, 0xa8, 0xc0, 0x3d, 0x9a, 0xb9, 0x09, 0x50, 0x3f, - 0x09, 0x87, 0xa4, 0x3f, 0x7a, 0x33, 0xef, 0xf0, 0xfb, 0x77, 0x02, 0x7d, - 0x92, 0xaf, 0x73, 0xaa, 0xcc, 0x3f, 0x66, 0x56, 0xd0, 0x21, 0xd1, 0xe8, - 0x0e, 0x47, 0x03, 0x5e, 0x3b, 0xe9, 0xa2, 0xe3, 0x83, 0x0b, 0x73, 0xd3, - 0xaa, 0x94, 0x80, 0xef, 0x7c, 0xdf, 0xde, 0x86, 0xc3, 0xa9, 0x62, 0x34, - 0x76, 0xee, 0x4d, 0x15, 0x73, 0x7b, 0xd7, 0x6d, 0xd4, 0x21, 0x05, 0xd4, - 0xcf, 0xf3, 0x54, 0xdc, 0x49, 0x5f, 0x5a, 0x2a, 0x37, 0x19, 0x89, 0x61, - 0x1d, 0x95, 0x17, 0x8b, 0x09, 0x95, 0x5d, 0x9f, 0xde, 0x86, 0x03, 0x93, - 0x76, 0xec, 0x54, 0xec, 0x13, 0xc3, 0xf9, 0x38, 0x8f, 0xa9, 0x11, 0xf0, - 0x9a, 0x0e, 0x5e, 0x38, 0x69, 0xeb, 0x62, 0x41, 0x9e, 0xd0, 0x1b, 0x59, - 0x8c, 0xfd, 0x16, 0xfa, 0xd8, 0x99, 0x0d, 0x83, 0x7e, 0xba, 0x5b, 0xc6, - 0x59, 0xe1, 0xae, 0xba, 0xb9, 0xb8, 0xba, 0xa5, 0x4d, 0x20, 0x00, 0xc9, - 0x0c, 0xe1, 0x77, 0xdf, 0xc4, 0x95, 0xca, 0x7c, 0xa5, 0xef, 0x0a, 0xed, - 0x9b, 0x31, 0x06, 0xe1, 0xc9, 0xa3, 0x88, 0x0a, 0xcc, 0x3d, 0xc8, 0xb6, - 0x01, 0xe2, 0xa9, 0x29, 0x03, 0x8a, 0x28, 0xf8, 0x0d, 0x70, 0x77, 0xb9, - 0xe1, 0x1b, 0x06, 0x19, 0x86, 0xc1, 0xd3, 0xcf, 0x6b, 0x9c, 0x09, 0x70, - 0x50, 0xed, 0xb5, 0xf6, 0x69, 0xcc, 0xac, 0x30, 0x6a, 0x1f, 0x1d, 0xe6, - 0x75, 0x33, 0xab, 0x55, 0x48, 0xfa, 0x81, 0xb8, 0x06, 0x3a, 0x78, 0xee, - 0xde, 0xef, 0xe2, 0x17, 0xc4, 0x3e, 0xe5, 0x22, 0xa7, 0xd1, 0x45, 0x5b, - 0x57, 0xb0, 0xde, 0x69, 0x30, 0xd1, 0x9a, 0xd7, 0x6b, 0x0e, 0x7a, 0x30, - 0x0d, 0xb5, 0xec, 0x60, 0xa7, 0x05, 0x87, 0x42, 0x4b, 0x92, 0x1f, 0x68, - 0x8e, 0x1a, 0x90, 0x84, 0x27, 0x2a, 0xc0, 0xd2, 0xff, 0xbc, 0x8e, 0x34, - 0x53, 0x9d, 0x04, 0x50, 0xcb, 0x79, 0xd9, 0x55, 0xd5, 0x4d, 0x3c, 0xe2, - 0xb4, 0x9b, 0x57, 0x07, 0x1f, 0xce, 0xd0, 0xa7, 0x84, 0xe1, 0xb7, 0x3a, - 0xaf, 0xc5, 0x67, 0x64, 0xbc, 0x02, 0xbe, 0xb0, 0x65, 0x7e, 0xb0, 0x4c, - 0xc2, 0x2d, 0xcd, 0xf8, 0x60, 0xcb, 0xfe, 0xd1, 0x8d, 0x14, 0x5a, 0xd3, - 0x38, 0xd4, 0x71, 0x5a, 0xca, 0xbb, 0xfe, 0x0e, 0x54, 0xf9, 0xb4, 0x25, - 0xa5, 0x71, 0x13, 0x95, 0x14, 0xdc, 0x86, 0xb8, 0x21, 0xa7, 0x2e, 0x13, - 0xc6, 0x2f, 0xce, 0xe7, 0x6c, 0xb8, 0x0d, 0xc9, 0xe4, 0xc4, 0x64, 0x12, - 0x78, 0x1c, 0x95, 0x92, 0xc2, 0xec, 0xaa, 0xd3, 0xc3, 0x3a, 0xd2, 0xe8, - 0x95, 0xf0, 0x6b, 0x03, 0x8c, 0xcf, 0x6b, 0xdb, 0x21, 0xa0, 0xcf, 0xf4, - 0x05, 0xc8, 0xe7, 0x77, 0x05, 0x55, 0x7b, 0x6b, 0xfa, 0x96, 0xf1, 0x7c, - 0x30, 0x62, 0x75, 0xbe, 0x6e, 0xea, 0xba, 0x9f, 0x40, 0x2e, 0x9a, 0x86, - 0x93, 0xcc, 0x38, 0xf7, 0xee, 0xd8, 0xbb, 0x24, 0xcd, 0x85, 0x3e, 0x85, - 0x16, 0x8c, 0x33, 0x23, 0x73, 0xe6, 0x43, 0xc4, 0x67, 0xbf, 0xef, 0x85, - 0xb1, 0x44, 0xf9, 0x55, 0x93, 0x4d, 0x0b, 0x8e, 0xc1, 0x42, 0x13, 0xc6, - 0xc8, 0x09, 0x63, 0xab, 0xb3, 0xc7, 0xc4, 0xa4, 0x8b, 0x72, 0xfb, 0xa5, - 0x99, 0xa1, 0x5d, 0x07, 0x02, 0x82, 0x56, 0x11, 0x3c, 0xc2, 0x5a, 0x55, - 0xf9, 0x3a, 0x93, 0x61, 0x89, 0x46, 0xb7, 0x6a, 0x42, 0x76, 0x1e, 0x70, - 0xde, 0xd9, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0x32, 0xc1, 0x61, 0xaa, 0xdb, 0xe9, 0xae, 0x88, 0xcb, 0xf7, 0x28, 0xdd, - 0x82, 0x62, 0x61, 0x41, 0x4e, 0xbb, 0xf9, 0xb7, 0xe8, 0x81, 0x99, 0x18, - 0xe2, 0xa7, 0xb4, 0x7c, 0xb7, 0x08, 0x44, 0x6f, 0x24, 0xb3, 0xda, 0x57, - 0x62, 0x29, 0xc7, 0xa6, 0x84, 0xb1, 0x5d, 0xc5, 0x00, 0x4c, 0x30, 0x16, - 0xf0, 0x0a, 0x74, 0x73, 0xec, 0xaf, 0xb5, 0xde, 0xb0, 0xa7, 0x75, 0x22, - 0x8f, 0x9e, 0x43, 0x01, 0x68, 0xae, 0x91, 0xeb, 0x46, 0x52, 0x3f, 0x2c, - 0x4e, 0xc5, 0xd0, 0xc8, 0x15, 0xea, 0x99, 0xc2, 0x37, 0x5b, 0x68, 0xb5, - 0xce, 0x41, 0x92, 0xbf, 0xd6, 0xdb, 0x85, 0xad, 0x08, 0xd1, 0x11, 0x93, - 0xe8, 0xd4, 0x78, 0x43, 0x3b, 0x7d, 0xcb, 0x42, 0x84, 0xf3, 0x61, 0x88, - 0x9e, 0x6a, 0x73, 0xb9, 0x78, 0x17, 0x9a, 0x9f, 0xfb, 0x97, 0xcb, 0xd6, - 0xb5, 0x3f, 0x00, 0x41, 0xb0, 0x30, 0x2f, 0x6f, 0x89, 0xdd, 0xfa, 0x13, - 0xd1, 0x07, 0xbe, 0x2f, 0xea, 0x91, 0x62, 0xaa, 0xed, 0xcb, 0xfd, 0x07, - 0x82, 0xbb, 0x3f, 0xf4, 0xa6, 0x94, 0x66, 0x71, 0x20, 0x61, 0xac, 0x84, - 0x04, 0x70, 0xf2, 0xd3, 0xdf, 0xac, 0x44, 0xfd, 0x47, 0x26, 0x81, 0x64, - 0xb3, 0xa6, 0x90, 0x2b, 0xd2, 0x2c, 0xd0, 0x77, 0x81, 0x53, 0x45, 0x78, - 0x5f, 0x30, 0x77, 0x91, 0x83, 0x13, 0x33, 0xd1, 0x91, 0xa6, 0x35, 0x21, - 0xcb, 0x26, 0x54, 0x0a, 0xf7, 0x70, 0x5e, 0xdb, 0xd8, 0x92, 0xc7, 0xdf, - 0xf9, 0x2a, 0x46, 0x91, 0x22, 0x3b, 0xe6, 0xe1, 0x91, 0xeb, 0xa6, 0x78, - 0x81, 0x57, 0xf3, 0x04, 0xdf, 0x34, 0x55, 0x74, 0x0a, 0xfe, 0xf2, 0xbd, - 0xb3, 0xeb, 0xa3, 0x8e, 0x71, 0x15, 0xa9, 0x2f, 0x53, 0xe2, 0xa1, 0x45, - 0xdf, 0xe8, 0x29, 0x40, 0xf1, 0x4b, 0x23, 0xdb, 0x8e, 0xee, 0x19, 0xa8, - 0xd4, 0x15, 0x90, 0x8c, 0x04, 0x46, 0x81, 0x49, 0x92, 0xe5, 0xe1, 0xfe, - 0x99, 0x06, 0xfc, 0x3e, 0x43, 0x58, 0x3b, 0x19, 0x7f, 0xd2, 0x13, 0x65, - 0xc2, 0x64, 0x27, 0x6d, 0x93, 0x6a, 0xcf, 0x48, 0x2a, 0x3d, 0xdd, 0x79, - 0x9f, 0x05, 0x32, 0xeb, 0xfd, 0xb4, 0xd2, 0x1d, 0x16, 0x61, 0x3d, 0x17, - 0x4c, 0xb8, 0xad, 0x63, 0x0e, 0x6b, 0x8a, 0x4a, 0x34, 0x4c, 0xb5, 0x3c, - 0x0f, 0x05, 0x28, 0x8c, 0x8b, 0xdf, 0xf4, 0xa0, 0x49, 0xbf, 0x34, 0x6c, - 0x6a, 0x5f, 0x40, 0x95, 0x48, 0x4b, 0x93, 0x1e, 0x61, 0x6d, 0x58, 0xc3, - 0x86, 0x98, 0x70, 0x11, 0x4e, 0x44, 0x65, 0xc1, 0x0d, 0xea, 0x2f, 0xda, - 0x38, 0x16, 0xbd, 0xd4, 0x7b, 0x3e, 0x31, 0xee, 0x42, 0x4c, 0xdc, 0xe9, - 0x8b, 0x1f, 0xa9, 0xcf, 0xab, 0x60, 0xb5, 0xb1, 0xd2, 0xf2, 0x6a, 0xe9, - 0xbc, 0xcc, 0xcb, 0x60, 0x4a, 0xca, 0x70, 0x79, 0x64, 0x9d, 0x07, 0x1e, - 0xdb, 0xef, 0x34, 0xaf, 0x17, 0x93, 0x6b, 0x60, 0x73, 0x2d, 0x8c, 0x08, - 0x27, 0x1e, 0x46, 0x9f, 0xcb, 0x33, 0xdd, 0x76, 0xef, 0x17, 0x58, 0x9a, - 0x5f, 0x82, 0x78, 0x0f, 0xbf, 0xe7, 0x0f, 0x3a, 0x1e, 0xa8, 0x30, 0xbf, - 0xff, 0xc7, 0xc7, 0x82, 0x8b, 0xc3, 0x65, 0x04, 0xfd, 0x45, 0xc9, 0x88, - 0x99, 0x8e, 0x44, 0xc5, 0x23, 0x1e, 0xbf, 0xf1, 0x95, 0x70, 0x35, 0xe6, - 0x56, 0x4a, 0x53, 0xb2, 0xac, 0x0c, 0xfd, 0xf5, 0x61, 0x26, 0x5b, 0x70, - 0xd6, 0x4c, 0xfc, 0x0f, 0xcc, 0x53, 0x6e, 0x25, 0xca, 0x1d, 0x0c, 0x56, - 0xf7, 0x9c, 0x95, 0xf6, 0x3c, 0x08, 0x0c, 0x64, 0xb1, 0x1c, 0x5c, 0xe6, - 0x25, 0xa4, 0xa3, 0xb7, 0xaf, 0x8b, 0xbc, 0xe1, 0x68, 0xdf, 0x10, 0xab, - 0xbb, 0xd5, 0x30, 0x64, 0x42, 0xf6, 0xe6, 0x9a, 0xb5, 0x59, 0x12, 0x76, - 0x92, 0xac, 0x29, 0xe9, 0x45, 0xdb, 0x2e, 0x62, 0x22, 0x58, 0x24, 0x89, - 0xc8, 0x6a, 0x2a, 0xa7, 0x3f, 0x04, 0x53, 0x4e, 0x07, 0x41, 0x4e, 0x5f, - 0x95, 0x5f, 0x6e, 0x14, 0x5b, 0xa7, 0xa7, 0xd3, 0x5a, 0xa2, 0x95, 0x4a, - 0xc8, 0xe9, 0x3c, 0x5a, 0x84, 0x50, 0xbc, 0xe1, 0x9c, 0x7a, 0x16, 0xe5, - 0xc7, 0x04, 0x9d, 0x60, 0x2e, 0x7d, 0xb3, 0x77, 0x5d, 0x86, 0x2e, 0xac, - 0x57, 0x2a, 0x31, 0x26, 0x23, 0x6e, 0xcc, 0x7f, 0xb8, 0x36, 0x29, 0xa9, - 0xa8, 0xd9, 0xc6, 0x75, 0xee, 0x16, 0x23, 0x27, 0x0f, 0xe1, 0xb0, 0x3d, - 0x91, 0x3a, 0x26, 0x4a, 0x60, 0x72, 0x14, 0xf9, 0x3c, 0x66, 0x66, 0xe8, - 0x7d, 0x4a, 0x6f, 0x7e, 0x63, 0x58, 0x6a, 0x28, 0x78, 0x50, 0xef, 0x3b, - 0x9d, 0xeb, 0xb6, 0x4b, 0x5d, 0x55, 0x80, 0x84, 0x97, 0x9b, 0x74, 0x4b, - 0x5c, 0x09, 0x1d, 0xe7, 0x57, 0xfc, 0x40, 0x3f, 0xa9, 0xbd, 0xdf, 0x61, - 0x2a, 0x89, 0x62, 0x51, 0xfc, 0x24, 0xee, 0xee, 0x97, 0x10, 0xca, 0xb6, - 0x0e, 0x8e, 0x71, 0x67, 0x2a, 0x79, 0x4f, 0xc4, 0xe6, 0x3e, 0x27, 0xc2, - 0x9b, 0x85, 0xfd, 0xde, 0xfb, 0x58, 0x75, 0xf3, 0x1c, 0x31, 0xa2, 0x56, - 0x3e, 0xdc, 0x24, 0xf4, 0x4f, 0xcb, 0x5a, 0x1a, 0x77, 0x5c, 0x28, 0xd1, - 0x5a, 0x55, 0xa9, 0x8c, 0xb5, 0xdd, 0x77, 0x93, 0x58, 0xd8, 0x2f, 0x7d, - 0x5a, 0x67, 0xa1, 0x95, 0x0a, 0xd2, 0x6a, 0x93, 0xa6, 0xf0, 0x5f, 0x7f, - 0x0a, 0x29, 0xdb, 0x1d, 0x8c, 0xa7, 0x12, 0x0a, 0xf4, 0xc9, 0xcd, 0x70, - 0xd1, 0xbd, 0x48, 0xd4, 0x9a, 0xbb, 0xbb, 0x24, 0xbf, 0x52, 0x25, 0xb9, - 0x75, 0xc2, 0x17, 0x36, 0x6f, 0x4a, 0xc0, 0x53, 0x6d, 0x38, 0xfb, 0x7a, - 0x60, 0xc8, 0x5d, 0x03, 0xc1, 0x1c, 0x0c, 0x31, 0xf0, 0x59, 0xed, 0x0a, - 0x5f, 0x84, 0xf2, 0x89, 0x6c, 0xb4, 0xd5, 0x24, 0x2d, 0x2a, 0xda, 0xbe, - 0x74, 0x1d, 0x22, 0xe2, 0xc6, 0xf0, 0x9b, 0x98, 0x5a, 0x41, 0x11, 0x4c, - 0x51, 0x97, 0x16, 0xa7, 0xc9, 0xd8, 0x53, 0x12, 0x53, 0xdd, 0x22, 0xa9, - 0xf2, 0xae, 0x52, 0x49, 0x02, 0xf9, 0x5c, 0x78, 0x00, 0xa2, 0x64, 0xff, - 0x91, 0x62, 0x20, 0x6a, 0x87, 0x6a, 0x40, 0x01, 0x85, 0x30, 0xf5, 0xdd, - 0xa7, 0x64, 0x0a, 0x85, 0x8d, 0x37, 0x99, 0xcb, 0x03, 0xc8, 0x29, 0x56, - 0x7e, 0x75, 0x4f, 0xa1, 0xc3, 0x76, 0xce, 0xdb, 0xa3, 0xb4, 0x7e, 0x91, - 0x95, 0xbe, 0x53, 0x0e, 0x20, 0xc9, 0xe7, 0x71, 0x78, 0xad, 0x3d, 0x4c, - 0xbb, 0x59, 0xb9, 0x77, 0xcf, 0x7d, 0x7b, 0xff, 0x15, 0xdb, 0x1d, 0xae, - 0x1f, 0xbe, 0x33, 0x88, 0x01, 0x04, 0x95, 0xe5, 0xe9, 0x6a, 0x1c, 0xbf, - 0xc8, 0xc3, 0x33, 0x3b, 0xd8, 0x2f, 0x75, 0x4a, 0xc3, 0x6f, 0x09, 0x88, - 0x26, 0x46, 0x90, 0x89, 0x53, 0x12, 0x27, 0xc2, 0x7d, 0x23, 0x6b, 0xc4, - 0xe3, 0x0a, 0x0f, 0xc2, 0x86, 0x6d, 0x20, 0x35, 0x82, 0x33, 0xec, 0xdd, - 0xa7, 0x6a, 0xc3, 0xa8, 0x11, 0xdc, 0x02, 0xd9, 0x05, 0x1b, 0x04, 0x75, - 0x92, 0x6c, 0x08, 0x9e, 0x38, 0x72, 0xd9, 0x7d, 0x9b, 0xbc, 0xfd, 0xca, - 0xb8, 0x06, 0x0e, 0x24, 0x89, 0x90, 0xde, 0x52, 0xe4, 0xd1, 0xcc, 0x99, - 0x87, 0x0b, 0x87, 0xbb, 0x5c, 0xa9, 0xab, 0xec, 0xb5, 0xe4, 0xdd, 0x5d, - 0xfa, 0xb1, 0x97, 0x5f, 0x61, 0xf7, 0x58, 0xd6, 0x08, 0x02, 0xf2, 0x51, - 0x7c, 0x7a, 0xe6, 0xf1, 0xcb, 0x43, 0xd0, 0x21, 0x09, 0xb8, 0x82, 0xa9, - 0x52, 0xd9, 0xa8, 0x7f, 0x2b, 0xe1, 0x0f, 0x31, 0xbc, 0x16, 0xa2, 0xce, - 0x35, 0x55, 0x2e, 0xd6, 0xda, 0x38, 0xd9, 0xc2, 0x5e, 0xca, 0x27, 0xd9, - 0xa6, 0xd6, 0x4b, 0xa2, 0x73, 0xc4, 0xce, 0x66, 0x30, 0x60, 0xa2, 0x01, - 0xfa, 0xc1, 0xd6, 0xc8, 0xea, 0xdd, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x70, 0xe2, 0x62, 0x68, 0xff, 0x60, 0x67, 0x64, - 0x88, 0xdd, 0x81, 0x79, 0x82, 0xf5, 0x46, 0xf9, 0x7e, 0x0e, 0xa9, 0x26, - 0xf6, 0xcf, 0x5d, 0xef, 0x10, 0x11, 0xe1, 0x71, 0x72, 0x77, 0xcf, 0x02, - 0x7b, 0xf1, 0x6e, 0xc4, 0xb4, 0xfa, 0x2a, 0x12, 0xfe, 0x7e, 0x3c, 0x66, - 0xef, 0x41, 0x98, 0x3a, 0x1f, 0xa9, 0x14, 0x8f, 0x46, 0x22, 0xa0, 0xc2, - 0xee, 0x93, 0x25, 0x34, 0xf2, 0xb7, 0x6d, 0x0a, 0x36, 0xde, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0xd4, 0x17, 0x62, 0x25, - 0xfd, 0x5b, 0x75, 0xeb, 0xec, 0x06, 0xc9, 0x39, 0x86, 0x6d, 0xc5, 0x60, - 0x2d, 0x33, 0x3d, 0xce, 0x6a, 0x9f, 0x07, 0x3b, 0xb9, 0x70, 0x0f, 0xc7, - 0x13, 0x46, 0x35, 0x46, 0x26, 0xe4, 0xbc, 0x6e, 0x54, 0x89, 0x29, 0xd5, - 0xa4, 0x94, 0xa0, 0x3a, 0x7a, 0x61, 0xcf, 0xd1, 0x48, 0x27, 0x7a, 0x72, - 0x95, 0xde, 0x93, 0xd1, 0x19, 0x1f, 0xc9, 0xc8, 0x8f, 0x0d, 0xce, 0x34, - 0x03, 0x39, 0x0a, 0x92, 0x16, 0x09, 0xc4, 0x49, 0xf9, 0x30, 0x2e, 0x19, - 0xd1, 0x69, 0x7e, 0x78, 0x00, 0x25, 0x30, 0x6f, 0x6b, 0xe1, 0xbe, 0xad, - 0xb2, 0x05, 0xde, 0xc7, 0xc2, 0xf7, 0xd5, 0xa7, 0x4d, 0x03, 0x6f, 0x6b, - 0xcd, 0xcb, 0x42, 0xfa, 0x88, 0x16, 0xd5, 0xa6, 0x60, 0x08, 0xd4, 0xa5, - 0x5b, 0x3b, 0x7b, 0xa2, 0xca, 0xa3, 0xa2, 0x5d, 0x63, 0x7f, 0xc0, 0x37, - 0xc5, 0x7e, 0x99, 0x04, 0x5d, 0x9a, 0xb9, 0xa5, 0xac, 0xd1, 0xe2, 0x5d, - 0xb2, 0x2b, 0x7e, 0xbb, 0xb9, 0x66, 0x13, 0xa7, 0x30, 0xbf, 0x80, 0x0c, - 0x2b, 0x8d, 0x45, 0xe1, 0x8d, 0x96, 0x25, 0x27, 0x47, 0x3d, 0x21, 0x7d, - 0x1c, 0x42, 0xac, 0x31, 0x26, 0x47, 0x59, 0xb3, 0x44, 0x85, 0xf2, 0x8e, - 0x7d, 0x01, 0x96, 0x6d, 0xb2, 0x64, 0xc3, 0xfc, 0xa7, 0x82, 0x06, 0x4a, - 0x87, 0x75, 0x9b, 0x99, 0x47, 0x7e, 0xa6, 0x4d, 0x2c, 0x36, 0xff, 0xac, - 0x2b, 0x77, 0x96, 0x52, 0x14, 0x8d, 0x07, 0x0d, 0x28, 0x9d, 0x84, 0xa2, - 0xda, 0xd6, 0x45, 0x3a, 0xd4, 0xe6, 0xb7, 0x9a, 0xf3, 0x34, 0xe3, 0xda, - 0x39, 0xdf, 0x35, 0x9c, 0xe4, 0x87, 0x55, 0xc8, 0x43, 0xd0, 0x61, 0x46, - 0x52, 0x2f, 0x75, 0x63, 0xbb, 0x98, 0x97, 0xeb, 0xfb, 0x15, 0xaf, 0x8e, - 0x96, 0xdc, 0xff, 0x0a, 0x90, 0xda, 0x09, 0x63, 0x28, 0x7b, 0x92, 0x73, - 0x0b, 0xd4, 0x2b, 0x72, 0x2a, 0x86, 0x32, 0xc3, 0xc1, 0x3e, 0xe4, 0x2c, - 0x07, 0x89, 0x53, 0xb7, 0xfe, 0x78, 0x6c, 0x95, 0xb4, 0x62, 0x4d, 0x4b, - 0xfe, 0x6c, 0xfc, 0x5e, 0x4e, 0xa7, 0x8c, 0x07, 0x4f, 0x85, 0x27, 0xe0, - 0x7b, 0xd9, 0x7a, 0xe5, 0x1d, 0xbc, 0x36, 0xda, 0x8e, 0x21, 0xff, 0xb3, - 0x60, 0x2c, 0x5e, 0x23, 0x0f, 0xde, 0x3f, 0xae, 0xa5, 0x3a, 0x50, 0xa9, - 0x99, 0x39, 0x45, 0xaf, 0xd3, 0x5f, 0x4a, 0x15, 0xad, 0x9c, 0x66, 0x7f, - 0x92, 0xe0, 0x02, 0x81, 0x3e, 0x06, 0x6a, 0x5e, 0xd0, 0x0c, 0x42, 0xe7, - 0xcf, 0xe2, 0xeb, 0xa3, 0xe0, 0xf7, 0x2d, 0x8a, 0x21, 0xdb, 0x64, 0x28, - 0x2a, 0xb3, 0x2b, 0xc4, 0xc9, 0xd5, 0x60, 0xaf, 0xfc, 0x15, 0xa1, 0x44, - 0x9c, 0x96, 0x04, 0x42, 0x1c, 0x55, 0x8c, 0xa5, 0xce, 0x80, 0xce, 0x75, - 0x64, 0xa9, 0xf6, 0xa5, 0x5a, 0x0f, 0x8a, 0x4b, 0x8b, 0x72, 0xcf, 0x3e, - 0xd7, 0xeb, 0xe1, 0xd0, 0xd3, 0x2d, 0x04, 0x6c, 0x9e, 0x02, 0x75, 0x43, - 0x5c, 0xc1, 0x57, 0x66, 0xd9, 0x14, 0x5b, 0x08, 0x10, 0x44, 0x8d, 0x8e, - 0x89, 0xd1, 0x65, 0x27, 0x2a, 0x0b, 0x99, 0x6f, 0x09, 0xa6, 0x20, 0xa5, - 0x75, 0x24, 0xe4, 0xf7, 0xf5, 0xe0, 0xed, 0x79, 0x37, 0x18, 0x13, 0x1c, - 0xd9, 0xd1, 0xf5, 0x69, 0x0c, 0xa5, 0x02, 0xdf, 0x6a, 0xfd, 0x2e, 0x35, - 0x8e, 0xd0, 0x41, 0x91, 0x61, 0x0f, 0x5c, 0xdd, 0x70, 0xbf, 0x1c, 0x49, - 0xcb, 0xe9, 0xc9, 0x33, 0xc4, 0x99, 0x1e, 0x8b, 0x75, 0x48, 0xc2, 0x58, - 0xa4, 0x70, 0x1f, 0xbb, 0xcd, 0xd3, 0x0e, 0x79, 0x25, 0xbe, 0x53, 0xfa, - 0x32, 0x32, 0xf6, 0xb9, 0xf0, 0x0a, 0x52, 0x5b, 0xe0, 0x69, 0xff, 0x43, - 0xda, 0x98, 0x1f, 0xee, 0x54, 0x60, 0xf8, 0x24, 0x43, 0xc5, 0x37, 0x72, - 0xd1, 0xfc, 0x99, 0x9a, 0x3e, 0x24, 0xe0, 0xd9, 0xc2, 0x61, 0x47, 0xb3, - 0x26, 0x09, 0x85, 0x74, 0xa1, 0x2b, 0x4a, 0x70, 0xd0, 0x1b, 0x90, 0x03, - 0x25, 0xd9, 0x22, 0xc2, 0x16, 0x22, 0x3a, 0x62, 0x20, 0xd4, 0x13, 0xce, - 0xa2, 0xc7, 0x02, 0xfb, 0x9a, 0xbf, 0xf1, 0x1c, 0x80, 0x01, 0x97, 0x90, - 0x7f, 0x5a, 0x98, 0x70, 0x30, 0x61, 0x77, 0xe5, 0xd4, 0x3b, 0x03, 0x42, - 0x57, 0x31, 0x5e, 0xc6, 0x64, 0xe1, 0xf4, 0x64, 0x77, 0x21, 0x9b, 0x44, - 0x1c, 0xd9, 0x8c, 0x95, 0x8a, 0xf1, 0xcb, 0x82, 0xac, 0xc1, 0x26, 0x31, - 0xf2, 0x22, 0x41, 0xab, 0xbb, 0x23, 0xd3, 0x8d, 0xcc, 0x5c, 0x9d, 0x9b, - 0x1d, 0x9c, 0x4d, 0xf3, 0x62, 0xde, 0x15, 0x6a, 0x94, 0x8d, 0x24, 0xe7, - 0x52, 0x8d, 0x2a, 0xa4, 0x1d, 0x54, 0x5a, 0xda, 0xaf, 0xab, 0x05, 0x27, - 0x4b, 0xbb, 0xb4, 0xda, 0x0c, 0xb9, 0x20, 0xb3, 0xaf, 0x4a, 0xeb, 0x37, - 0xe5, 0x43, 0xe4, 0xc1, 0xf6, 0x9e, 0xf8, 0x6c, 0xd8, 0xa1, 0x0c, 0xf9, - 0xd1, 0x4b, 0x96, 0xa0, 0x6d, 0x38, 0x64, 0x41, 0xd3, 0x14, 0xfb, 0xad, - 0x89, 0xa9, 0xf7, 0x36, 0x01, 0x0f, 0xbe, 0x8e, 0xd7, 0x76, 0xc6, 0x70, - 0x22, 0x32, 0x8b, 0x08, 0xca, 0x95, 0xbf, 0xcf, 0x5e, 0xb8, 0xc0, 0x3f, - 0xd9, 0xaa, 0x84, 0xab, 0x30, 0x5b, 0xe3, 0x7a, 0x61, 0x32, 0xe5, 0x54, - 0x01, 0x5e, 0xb6, 0x1c, 0x9c, 0x78, 0x52, 0x2a, 0xa7, 0xf5, 0x29, 0xa6, - 0x0f, 0x14, 0xa5, 0x3a, 0x34, 0xd4, 0xf5, 0xc2, 0xb2, 0x8d, 0x12, 0x7b, - 0x8a, 0x64, 0x00, 0xfd, 0x02, 0x0e, 0x02, 0x26, 0x5a, 0xb9, 0xeb, 0xfd, - 0x30, 0xce, 0x51, 0xec, 0x5f, 0xbc, 0xee, 0x53, 0x21, 0xec, 0x0e, 0xee, - 0xc4, 0x28, 0x1a, 0xec, 0x2a, 0x39, 0x4e, 0xe1, 0x50, 0x11, 0x3f, 0x16, - 0xdd, 0xbf, 0xaf, 0x3e, 0xbe, 0xd4, 0xfe, 0x34, 0x1e, 0x62, 0x3f, 0x5a, - 0xea, 0x05, 0xfc, 0xd5, 0x45, 0x08, 0x47, 0xce, 0x38, 0x3f, 0x75, 0x7e, - 0x0c, 0x3a, 0x2a, 0x14, 0xa7, 0x61, 0xba, 0x3a, 0xa1, 0x41, 0xa2, 0x72, - 0x19, 0xfa, 0x33, 0x43, 0xa7, 0xf4, 0x4e, 0x5b, 0xf9, 0xb1, 0x45, 0x16, - 0x57, 0x8e, 0xb1, 0xad, 0x7d, 0x88, 0xd3, 0x93, 0xa2, 0x08, 0xf3, 0x96, - 0x4d, 0x84, 0x63, 0x08, 0xfa, 0x9d, 0xf3, 0x04, 0x33, 0xbd, 0x7e, 0x7a, - 0xc7, 0x63, 0xc5, 0x31, 0x5a, 0x82, 0x33, 0x90, 0x56, 0x44, 0xe9, 0xd3, - 0xc4, 0xd4, 0x76, 0x29, 0x2f, 0xdb, 0xa3, 0x9d, 0xff, 0xd4, 0xd2, 0xb1, - 0xce, 0xf1, 0xcb, 0x7f, 0x10, 0x3b, 0x90, 0xa4, 0x1b, 0xa0, 0x9b, 0xa7, - 0xfa, 0x27, 0x40, 0x11, 0x35, 0xc9, 0x7f, 0x01, 0x97, 0x76, 0x9f, 0x33, - 0xc5, 0xd6, 0x8d, 0x20, 0x07, 0x73, 0x93, 0x0b, 0x24, 0x88, 0x4e, 0x73, - 0x68, 0x79, 0x92, 0x20, 0x2a, 0x71, 0xed, 0x22, 0x0b, 0xfb, 0x42, 0xb5, - 0xd9, 0xc3, 0xaa, 0xed, 0x45, 0x03, 0x64, 0xde, 0x6f, 0x25, 0x8e, 0x3b, - 0x9a, 0xef, 0xc5, 0x63, 0xc2, 0x7f, 0x34, 0xd0, 0x1b, 0x20, 0xa3, 0xab, - 0x9d, 0x54, 0x41, 0x0e, 0x7b, 0x2e, 0x96, 0x12, 0x75, 0x58, 0xdf, 0xd5, - 0xaa, 0x3c, 0xf2, 0x26, 0xc1, 0xf1, 0x18, 0x37, 0x56, 0xf2, 0xd2, 0x86, - 0x6f, 0xd4, 0x9f, 0x57, 0x2b, 0x32, 0xe9, 0x08, 0x94, 0x53, 0x40, 0xc5, - 0x4d, 0x77, 0x39, 0xc6, 0x4c, 0x63, 0x53, 0xf9, 0xbf, 0x35, 0x08, 0xc5, - 0x0d, 0xd0, 0x89, 0x82, 0xa7, 0x2d, 0x6a, 0xb4, 0x22, 0xb1, 0x10, 0x7f, - 0xcf, 0x2e, 0x21, 0x27, 0x9c, 0x12, 0xc6, 0x0e, 0xca, 0xd2, 0x32, 0xb1, - 0x6d, 0xfd, 0x59, 0x12, 0x23, 0x60, 0x46, 0x89, 0xe0, 0x75, 0x5e, 0xc9, - 0xf4, 0x3d, 0x8a, 0x89, 0xd4, 0x23, 0xc2, 0xbe, 0x30, 0x32, 0x4a, 0x95, - 0x42, 0xe2, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0xa7, 0x0b, 0x48, 0xe2, 0xeb, 0xd7, 0x12, 0x42, 0x4c, 0x71, 0xfb, 0x25, - 0x17, 0x23, 0x0e, 0x01, 0xa6, 0x21, 0xb9, 0x17, 0x6e, 0xf0, 0x24, 0x66, - 0x9e, 0x9d, 0x0f, 0x71, 0xf8, 0x5b, 0x79, 0xb0, 0x1b, 0x1f, 0xe7, 0xa2, - 0xc0, 0x17, 0x16, 0x08, 0x5e, 0x24, 0x7b, 0xf9, 0x7a, 0x1e, 0x70, 0xe2, - 0x05, 0x40, 0x16, 0x56, 0xe7, 0x79, 0xf2, 0x30, 0xa3, 0xdc, 0xe3, 0x7a, - 0x7e, 0x22, 0x88, 0xc0, 0xf7, 0xc8, 0x5c, 0x93, 0x95, 0x86, 0x02, 0x6c, - 0x73, 0x76, 0xef, 0x03, 0x2d, 0xcb, 0xa5, 0x22, 0xfe, 0x05, 0xbb, 0xe6, - 0xfd, 0x19, 0x8c, 0x8b, 0x67, 0x58, 0x81, 0x81, 0x2d, 0x36, 0xd0, 0xc1, - 0x20, 0xb2, 0x87, 0x87, 0xdb, 0xe4, 0xe5, 0xd1, 0xd1, 0xd5, 0x81, 0x34, - 0x4c, 0xd6, 0x09, 0xa2, 0x5d, 0xcc, 0x99, 0x12, 0xa5, 0x06, 0x0f, 0x06, - 0x7e, 0xbb, 0x67, 0x26, 0x69, 0x15, 0x6e, 0x5f, 0xb1, 0x8e, 0xd6, 0x34, - 0xfc, 0x4d, 0xd9, 0x03, 0xb7, 0x5a, 0xf4, 0xaa, 0x03, 0x00, 0x88, 0x6b, - 0x5a, 0xc9, 0xf2, 0xfb, 0x67, 0x72, 0xbc, 0xf7, 0xb9, 0xdc, 0x97, 0xdf, - 0x80, 0x91, 0xfa, 0x30, 0x18, 0x02, 0x89, 0xc7, 0xc9, 0x62, 0x1d, 0xc0, - 0x0b, 0xa6, 0xfe, 0x7e, 0xb9, 0xa9, 0x1f, 0x11, 0x71, 0xe1, 0xd1, 0xfe, - 0x8d, 0x90, 0x2c, 0x09, 0x82, 0x2e, 0x36, 0x79, 0xa5, 0x75, 0x54, 0xfb, - 0xd3, 0x3c, 0xb4, 0x18, 0x2f, 0x4e, 0x3f, 0x37, 0xc4, 0xf8, 0xc5, 0x59, - 0xa3, 0xfd, 0x0c, 0x62, 0x9e, 0xa8, 0x7a, 0x56, 0xc5, 0x97, 0x89, 0x35, - 0xc7, 0xb0, 0x29, 0x87, 0xbf, 0x6a, 0xdc, 0xb1, 0x2f, 0x01, 0xf4, 0x0d, - 0x7c, 0x25, 0x95, 0x39, 0x81, 0xdd, 0x1a, 0x81, 0x36, 0xc0, 0x6b, 0xbf, - 0x6b, 0x4d, 0xea, 0x23, 0xc0, 0x3e, 0x5c, 0x39, 0xe5, 0x6b, 0x59, 0xa0, - 0x50, 0x02, 0x99, 0xdf, 0x4e, 0xe3, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x17, 0x88, 0xf8, 0xda, 0x3d, 0x57, 0x83, 0x63, - 0x76, 0xa0, 0x5c, 0x13, 0x1a, 0x00, 0x64, 0x30, 0x19, 0xfd, 0x2e, 0x9c, - 0x64, 0xb6, 0xda, 0x51, 0x7b, 0x55, 0xe8, 0xc4, 0x67, 0x1b, 0xda, 0xfc, - 0x4c, 0xd0, 0x27, 0x58, 0x56, 0xa1, 0x52, 0xd2, 0xb8, 0xd8, 0xd5, 0x94, - 0x69, 0xcf, 0xd0, 0xd5, 0x72, 0xeb, 0x2b, 0x05, 0xf3, 0x12, 0xa6, 0xac, - 0xa6, 0xf7, 0x90, 0x24, 0x1f, 0x22, 0x97, 0x5e, 0x8b, 0x7c, 0x2c, 0x30, - 0x61, 0x11, 0x9b, 0xdf, 0x83, 0x2b, 0x10, 0x09, 0x42, 0x77, 0x2b, 0xd9, - 0x43, 0xb3, 0x27, 0x69, 0x75, 0xf2, 0x2e, 0x72, 0xed, 0x50, 0xea, 0xbf, - 0x7f, 0x47, 0x39, 0x9c, 0xf8, 0x1e, 0xce, 0x6f, 0xdd, 0xe8, 0x40, 0xc5, - 0x14, 0x01, 0x7e, 0xbb, 0x0f, 0x43, 0x2d, 0x36, 0x70, 0x54, 0xc6, 0xbe, - 0x69, 0x24, 0xd1, 0x65, 0x49, 0x77, 0xf0, 0xd2, 0x99, 0xb4, 0x50, 0x8d, - 0x98, 0xcb, 0xbf, 0x7a, 0x7c, 0x65, 0xd3, 0x46, 0xcf, 0x90, 0x69, 0x56, - 0x15, 0xa2, 0xae, 0x11, 0x94, 0x60, 0xf9, 0x45, 0x17, 0x54, 0x6b, 0xbd, - 0xeb, 0xd8, 0x74, 0x41, 0x5c, 0xf6, 0x49, 0x0a, 0x14, 0xce, 0x43, 0x1f, - 0x67, 0xc3, 0x6c, 0xf4, 0x01, 0xce, 0x3f, 0x85, 0xed, 0x19, 0xa1, 0xf7, - 0x1b, 0xf8, 0x46, 0x45, 0xb4, 0xe9, 0xa7, 0x1f, 0x2a, 0x65, 0x00, 0x2a, - 0xd3, 0x8b, 0x6a, 0x3b, 0xac, 0x78, 0xab, 0xf4, 0xc8, 0x62, 0x76, 0xc8, - 0x24, 0xf8, 0xf8, 0x08, 0xe0, 0x64, 0x00, 0x64, 0x74, 0x9e, 0x55, 0x2e, - 0xf8, 0xc9, 0xc8, 0x58, 0x0e, 0x1f, 0x27, 0x32, 0xfd, 0x30, 0x24, 0x68, - 0xc8, 0xa4, 0x8c, 0x1c, 0xf3, 0xa7, 0x32, 0xae, 0x84, 0x0a, 0x8a, 0x1e, - 0x11, 0xce, 0xb2, 0x02, 0xf1, 0xb3, 0x5f, 0x7d, 0x5e, 0x54, 0x8c, 0xe0, - 0xeb, 0x46, 0x6e, 0x8a, 0x5f, 0x3f, 0x71, 0x47, 0x2a, 0x8a, 0xe6, 0xf0, - 0xb0, 0x04, 0x49, 0x64, 0xb3, 0x7e, 0x16, 0x09, 0x83, 0x5f, 0x12, 0xe0, - 0x85, 0xb7, 0x36, 0xc0, 0x8a, 0xa5, 0xcd, 0xae, 0xc0, 0xb4, 0xa2, 0x62, - 0x9b, 0xfa, 0x64, 0x18, 0x16, 0x8e, 0xb6, 0x50, 0xf2, 0x9b, 0xc4, 0x7d, - 0x0c, 0x4c, 0x8b, 0x58, 0xcf, 0x9b, 0x87, 0x09, 0xb1, 0x37, 0xbb, 0xaf, - 0xa7, 0x72, 0x79, 0x81, 0x09, 0x55, 0xa1, 0x6a, 0x87, 0xb0, 0x7d, 0xc8, - 0xb0, 0xc1, 0xa4, 0xa9, 0xdf, 0xcf, 0x95, 0x77, 0x36, 0x8e, 0x2b, 0xae, - 0xeb, 0x4b, 0xf9, 0x2a, 0x83, 0x6c, 0x53, 0x3c, 0x89, 0xa6, 0x08, 0xae, - 0x00, 0x4e, 0xb8, 0xf6, 0x34, 0x7c, 0xc6, 0x76, 0x87, 0x1a, 0x02, 0xb0, - 0x89, 0xa3, 0x0f, 0x00, 0xc6, 0x7b, 0xeb, 0xf7, 0x95, 0x40, 0xc5, 0x0d, - 0x6f, 0x74, 0xd8, 0x21, 0x2f, 0x9f, 0x24, 0xac, 0x43, 0xdb, 0x3a, 0x39, - 0x6c, 0x34, 0x59, 0x62, 0x66, 0xbc, 0x28, 0x7f, 0x8c, 0x64, 0x62, 0x8c, - 0x28, 0x6c, 0xf5, 0x79, 0x24, 0xb1, 0x00, 0x9c, 0x58, 0x6b, 0x09, 0xef, - 0xb0, 0x73, 0xcd, 0x47, 0xbb, 0x52, 0xfd, 0x26, 0x6a, 0xff, 0xb9, 0xf1, - 0xd5, 0x82, 0x59, 0x01, 0xfa, 0x87, 0x14, 0x24, 0x10, 0xb0, 0xf7, 0xdf, - 0xf9, 0x3f, 0x67, 0x19, 0xbd, 0xc7, 0x85, 0xb0, 0xad, 0x47, 0xa8, 0x4c, - 0x3e, 0xb6, 0x2e, 0x8a, 0xb3, 0xcc, 0x35, 0xa0, 0x48, 0xc7, 0x90, 0x81, - 0xb7, 0x53, 0x1c, 0x38, 0x63, 0xf2, 0x2f, 0xa0, 0x71, 0x82, 0xe2, 0x56, - 0xdb, 0x68, 0xe8, 0x5f, 0xf8, 0x42, 0xf2, 0xf6, 0xb8, 0x10, 0x6b, 0x54, - 0x21, 0xa0, 0xc1, 0xfe, 0xcb, 0xce, 0x12, 0xa2, 0x49, 0x51, 0x86, 0x53, - 0x56, 0xec, 0x33, 0xb3, 0x72, 0xce, 0xa4, 0x46, 0xe3, 0x37, 0xcb, 0xc0, - 0x95, 0xaa, 0xe2, 0xa3, 0xc5, 0xe9, 0x36, 0x40, 0xfe, 0xf7, 0xe2, 0x5a, - 0x6d, 0x58, 0x39, 0xb2, 0x41, 0x5d, 0xe2, 0x71, 0x72, 0xd0, 0xf0, 0x5c, - 0x16, 0x88, 0x95, 0x30, 0x0a, 0xfb, 0x8d, 0xda, 0x14, 0x80, 0xf4, 0x15, - 0xf2, 0xf6, 0xac, 0xf3, 0xd8, 0x8d, 0x13, 0x24, 0x2c, 0x74, 0x60, 0x6e, - 0x8c, 0xa1, 0x59, 0xcf, 0x74, 0x7c, 0x2d, 0x0b, 0xbb, 0x06, 0x5c, 0x9d, - 0xcd, 0xf3, 0x1e, 0x4a, 0xba, 0x3f, 0x9c, 0x4a, 0xc4, 0xd7, 0xf9, 0xf0, - 0xa5, 0x56, 0x7f, 0xb0, 0xa2, 0x57, 0xd0, 0xc3, 0xaa, 0xa7, 0xd0, 0x49, - 0xe2, 0x28, 0x9b, 0xc4, 0x64, 0x0c, 0xe0, 0x71, 0x9c, 0x05, 0x04, 0x95, - 0x00, 0x1f, 0x7b, 0xa9, 0xb9, 0xb3, 0x2b, 0x8f, 0x0b, 0x45, 0x1e, 0x23, - 0xaa, 0x27, 0x89, 0x4a, 0xb0, 0x7d, 0x03, 0xdf, 0xae, 0xdb, 0xcb, 0xc4, - 0xec, 0x3b, 0x02, 0xe2, 0x85, 0x3a, 0xb7, 0x25, 0xfb, 0xab, 0xca, 0xc1, - 0x33, 0x00, 0x5b, 0xd2, 0xcf, 0xb0, 0x11, 0x1d, 0x51, 0xb5, 0x5b, 0xea, - 0x94, 0xf7, 0xa0, 0x98, 0x33, 0xba, 0x58, 0xfc, 0x12, 0xea, 0xdd, 0x89, - 0xbd, 0x63, 0x03, 0xbe, 0x7e, 0x3b, 0x69, 0xc4, 0x9d, 0x57, 0x0f, 0xd6, - 0xbe, 0xea, 0x5b, 0xd0, 0x97, 0x63, 0x89, 0xb0, 0xa0, 0xc0, 0xd6, 0x39, - 0xc1, 0x69, 0x12, 0x6a, 0xfb, 0xac, 0x74, 0x7f, 0xfb, 0xf4, 0x7f, 0x38, - 0x44, 0x4c, 0x8a, 0xa2, 0x41, 0x15, 0xc0, 0x54, 0xc0, 0xed, 0x14, 0x83, - 0xef, 0xbc, 0x9c, 0xc7, 0xdd, 0x21, 0xd6, 0xf0, 0x9b, 0x7f, 0x09, 0xd5, - 0x96, 0xe5, 0xf7, 0xc5, 0xa9, 0xb3, 0x41, 0xb0, 0x9d, 0xeb, 0x49, 0x68, - 0x9d, 0x2b, 0xea, 0x47, 0x80, 0x3b, 0x54, 0xb8, 0xf4, 0x14, 0x5e, 0xd6, - 0x66, 0x89, 0x04, 0xb3, 0x00, 0xa3, 0xa8, 0x32, 0x62, 0x2e, 0xc3, 0x15, - 0xc6, 0x93, 0x7d, 0x40, 0x32, 0xb1, 0x6b, 0x60, 0xd3, 0x52, 0xdf, 0x09, - 0x8c, 0x80, 0x2b, 0x01, 0xe7, 0x97, 0x8d, 0xbb, 0x14, 0xd6, 0x10, 0x15, - 0x64, 0x00, 0x4a, 0x2c, 0x67, 0xca, 0xd0, 0xa1, 0x37, 0x33, 0x7b, 0xa1, - 0x2a, 0x5b, 0x5b, 0x78, 0xf8, 0x2f, 0xdd, 0x76, 0xab, 0x8a, 0xc3, 0xe3, - 0x37, 0x00, 0xd1, 0x29, 0xb0, 0x96, 0x1d, 0x18, 0xbe, 0x5d, 0x32, 0x7e, - 0xb7, 0x11, 0xa9, 0x78, 0x72, 0xa2, 0x2d, 0x29, 0x1c, 0x32, 0xa4, 0xff, - 0xc7, 0xce, 0xfe, 0xaf, 0xb7, 0x17, 0x43, 0xe5, 0x2f, 0xae, 0x45, 0xd3, - 0xaf, 0x10, 0xe3, 0xd0, 0x58, 0xb6, 0xee, 0xee, 0x7a, 0xb5, 0x06, 0x70, - 0x26, 0x7e, 0x2d, 0x5b, 0xd5, 0xe1, 0x7b, 0x9a, 0x37, 0x02, 0xfc, 0x1d, - 0x08, 0x4f, 0x1a, 0xf5, 0x44, 0x63, 0xde, 0x4b, 0x14, 0x68, 0x54, 0x0b, - 0x6a, 0x22, 0x4e, 0x02, 0x65, 0xcd, 0xf4, 0x04, 0xec, 0xcc, 0x8a, 0x0b, - 0xe0, 0x59, 0xf8, 0x65, 0x25, 0x63, 0xed, 0x0f, 0xa6, 0xc5, 0x3c, 0xcb, - 0x5d, 0xc5, 0xd8, 0x9f, 0x5a, 0xd3, 0x88, 0x3d, 0xd4, 0x2c, 0xb3, 0x04, - 0xf6, 0x97, 0xc7, 0xe2, 0xfd, 0xb6, 0xf4, 0x7d, 0x0d, 0xb9, 0x75, 0x7e, - 0x9d, 0x81, 0xdc, 0xdf, 0x8e, 0x90, 0x40, 0x0c, 0x7b, 0x45, 0xfe, 0x68, - 0xfd, 0xff, 0x1c, 0xf1, 0x16, 0x09, 0x33, 0x74, 0x27, 0x7b, 0x4d, 0xd9, - 0x9b, 0x48, 0x6d, 0x84, 0xeb, 0x96, 0x8f, 0x4b, 0x82, 0x73, 0xd5, 0x69, - 0x7d, 0x14, 0x45, 0x8c, 0xb8, 0x71, 0x87, 0x70, 0x09, 0x26, 0xfc, 0x89, - 0x6f, 0x0f, 0xb6, 0xc1, 0xd6, 0xe1, 0xbf, 0xdb, 0x85, 0x8f, 0x94, 0xad, - 0x94, 0x01, 0x01, 0xbb, 0x3f, 0xc0, 0xb5, 0xff, 0xf5, 0xbb, 0x4f, 0x50, - 0x09, 0xca, 0x7d, 0x36, 0x47, 0x66, 0x9a, 0x8c, 0xee, 0x84, 0x73, 0x9a, - 0x1f, 0x49, 0x75, 0xb4, 0xab, 0x66, 0xf7, 0x3b, 0xfe, 0x81, 0x67, 0xc9, - 0xd1, 0x16, 0xde, 0x1f, 0xc2, 0x24, 0xed, 0x6a, 0x5a, 0xe7, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0xc5, 0xd7, 0x14, 0x84, - 0xf8, 0xcf, 0x9b, 0xf4, 0xb7, 0x6f, 0x47, 0x90, 0x47, 0x30, 0x80, 0x4b, - 0x9e, 0x32, 0x25, 0xa9, 0xf1, 0x33, 0xb5, 0xde, 0xa1, 0x68, 0xf4, 0xe2, - 0x85, 0x1f, 0x07, 0x2f, 0xcc, 0x00, 0xfc, 0xaa, 0x7c, 0xa6, 0x20, 0x61, - 0x71, 0x7a, 0x48, 0xe5, 0x2e, 0x29, 0xa3, 0xfa, 0x37, 0x9a, 0x95, 0x3f, - 0xaa, 0x68, 0x93, 0xe3, 0x2e, 0xc5, 0xa2, 0x7b, 0x94, 0x5e, 0x60, 0x5f, - 0x10, 0x85, 0xf3, 0x23, 0x2d, 0x42, 0x4c, 0x13, 0x29, 0xc8, 0x8d, 0x78, - 0x6e, 0xd6, 0x8c, 0xe6, 0xfc, 0xb6, 0x2a, 0xa6, 0x3b, 0xf9, 0xab, 0x61, - 0x7c, 0x08, 0x8a, 0x3b, 0x70, 0xbe, 0x57, 0xaa, 0xda, 0x1f, 0x33, 0x4a, - 0x70, 0x17, 0x25, 0x0d, 0x3f, 0x60, 0x3d, 0xc8, 0x2e, 0xbd, 0x3b, 0x12, - 0x0b, 0x63, 0x5e, 0x3f, 0xf5, 0x6b, 0x1f, 0x0b, 0xd9, 0x33, 0x85, 0x23, - 0x71, 0x24, 0x9a, 0xb3, 0xdf, 0x5c, 0x1f, 0xef, 0x14, 0x33, 0xc8, 0x66, - 0x85, 0xb7, 0xf0, 0x56, 0x68, 0x1d, 0x51, 0x52, 0xaf, 0x80, 0x3c, 0xe2, - 0x59, 0x06, 0xf1, 0xd1, 0x9f, 0xb6, 0xc6, 0x80, 0x4e, 0x06, 0xea, 0x28, - 0xab, 0x17, 0x8f, 0x45, 0x7a, 0xf6, 0xb4, 0x93, 0xb7, 0x43, 0x9e, 0xc6, - 0xd4, 0x29, 0x00, 0x62, 0xab, 0x51, 0x7a, 0x72, 0xe5, 0xc1, 0xd4, 0x10, - 0xcd, 0xd6, 0x17, 0x54, 0xe4, 0x20, 0x84, 0x50, 0xe4, 0xf9, 0x00, 0x13, - 0xfd, 0xa6, 0x9f, 0xef, 0x19, 0xd4, 0x60, 0x2a, 0x42, 0x07, 0xcd, 0xd5, - 0xa1, 0x01, 0x6d, 0x07, 0x01, 0x32, 0x61, 0x3c, 0x65, 0x9a, 0x8f, 0x5d, - 0x33, 0xf3, 0xcb, 0x29, 0x0b, 0x8c, 0xe7, 0x3b, 0x83, 0x44, 0xb1, 0x3a, - 0x4f, 0x8e, 0x09, 0x15, 0x14, 0x69, 0x84, 0xa1, 0xbb, 0x15, 0xfd, 0xea, - 0xde, 0xbe, 0x5b, 0x6a, 0xc0, 0x95, 0x04, 0x46, 0x4d, 0x8a, 0xaa, 0xac, - 0xbc, 0x2f, 0xad, 0x12, 0x15, 0x8a, 0x53, 0x4c, 0x94, 0xb8, 0xca, 0x42, - 0x96, 0x3a, 0xf4, 0x7a, 0x18, 0x9d, 0x5b, 0x24, 0x9a, 0xce, 0xa8, 0x99, - 0xd4, 0x37, 0x32, 0xf6, 0xf2, 0xac, 0xaf, 0x3f, 0xf5, 0x3b, 0xfe, 0xda, - 0x13, 0x9a, 0xab, 0x4f, 0x55, 0xc0, 0x2c, 0x21, 0x2b, 0x65, 0x71, 0x1f, - 0xc5, 0x04, 0x32, 0xc9, 0x94, 0xe5, 0xfa, 0x6f, 0xd8, 0x2a, 0xbc, 0x70, - 0x85, 0x55, 0xdc, 0x62, 0xb7, 0x3a, 0x20, 0x0e, 0xe7, 0x67, 0x3c, 0xfe, - 0xcb, 0x83, 0x6a, 0x15, 0x6e, 0x4a, 0x35, 0x65, 0xea, 0xc1, 0xb9, 0x4d, - 0x35, 0xf9, 0x4b, 0xcf, 0xd8, 0xfd, 0xa5, 0xff, 0xff, 0x67, 0x70, 0x04, - 0xae, 0xa2, 0xa4, 0x12, 0x4b, 0x83, 0x4f, 0xc2, 0x96, 0xf0, 0x21, 0x2b, - 0x14, 0x21, 0x73, 0x42, 0x14, 0x99, 0x07, 0xe5, 0xa9, 0x52, 0x4c, 0xeb, - 0xbe, 0xc3, 0x11, 0x2e, 0x27, 0xda, 0x69, 0x94, 0xd5, 0xf6, 0xc6, 0x77, - 0x0a, 0x00, 0x5d, 0x9a, 0x82, 0xaa, 0x21, 0xfc, 0x86, 0x9b, 0xd0, 0xc4, - 0xc4, 0x1f, 0x53, 0x41, 0x7a, 0x92, 0xab, 0x1c, 0x12, 0xf6, 0xd5, 0x48, - 0xfb, 0x29, 0x4d, 0xb4, 0xd2, 0x12, 0xee, 0xc5, 0xea, 0x18, 0x33, 0xf1, - 0x4d, 0x0a, 0x10, 0x43, 0xa5, 0x35, 0xb1, 0x63, 0xc4, 0xfb, 0x38, 0x1e, - 0xef, 0xac, 0x3f, 0x97, 0x41, 0xc6, 0x96, 0x3e, 0x60, 0x13, 0xc8, 0xe3, - 0xbe, 0x61, 0xe9, 0xb6, 0x26, 0x16, 0x14, 0xf8, 0x82, 0x0d, 0x6e, 0x75, - 0x2f, 0xd7, 0x9c, 0x3a, 0x4a, 0xda, 0xd8, 0x2b, 0x35, 0xd4, 0x20, 0x32, - 0xd4, 0x4f, 0x0f, 0xe4, 0xdc, 0xd5, 0x0f, 0xfe, 0xa6, 0x81, 0x28, 0xb4, - 0x24, 0x3e, 0xb7, 0x0f, 0xb0, 0xb2, 0x5b, 0x05, 0x76, 0xbb, 0x24, 0x49, - 0x6a, 0x01, 0x68, 0x3f, 0x03, 0x96, 0xbc, 0x0c, 0x77, 0x48, 0x5f, 0xe8, - 0x39, 0xf4, 0xb0, 0x84, 0x42, 0x0e, 0x6a, 0xb9, 0xab, 0xf2, 0x95, 0x97, - 0xa7, 0x5e, 0x29, 0x34, 0x9d, 0x50, 0xc0, 0x4b, 0x40, 0x72, 0xa1, 0x7c, - 0x79, 0x5e, 0x95, 0xbe, 0xd6, 0x17, 0x43, 0x0a, 0xc9, 0x27, 0x25, 0x43, - 0xd7, 0x99, 0xd5, 0x48, 0xd8, 0x98, 0xb5, 0x2b, 0x7f, 0xe3, 0xbd, 0x1d, - 0xc0, 0xd1, 0x04, 0xd5, 0xa4, 0xe1, 0x68, 0xbe, 0x96, 0xf1, 0x2e, 0x5e, - 0x37, 0x8d, 0x39, 0x4e, 0xe4, 0xcc, 0x5e, 0xd7, 0xdd, 0x59, 0x7e, 0xe8, - 0xae, 0x48, 0xb5, 0xec, 0x2c, 0xf7, 0x68, 0x96, 0x00, 0xe5, 0xec, 0x03, - 0x6f, 0x98, 0x3a, 0x9a, 0x4f, 0xd9, 0xf1, 0x2f, 0xfe, 0x76, 0xcf, 0x8f, - 0x0b, 0x3d, 0x8a, 0x14, 0x00, 0x83, 0xcb, 0xca, 0xe3, 0x34, 0x81, 0xb5, - 0x91, 0x64, 0x2b, 0x12, 0x24, 0x86, 0x9c, 0xae, 0x3c, 0x7f, 0x53, 0x22, - 0xd4, 0x94, 0x90, 0x44, 0x6b, 0x35, 0xd2, 0xce, 0x8e, 0x95, 0xe2, 0xbe, - 0x46, 0x50, 0x3f, 0x3d, 0xc3, 0xcd, 0xef, 0x47, 0x99, 0xb5, 0xf2, 0xd4, - 0x6f, 0xf4, 0xfa, 0xa2, 0xfc, 0x1e, 0xe3, 0x99, 0x49, 0xfd, 0x1a, 0x6e, - 0x0d, 0xb5, 0xf1, 0xc8, 0x05, 0x22, 0x29, 0xca, 0x03, 0xb8, 0x15, 0x3b, - 0x01, 0x8a, 0x95, 0x74, 0x48, 0x93, 0x61, 0x35, 0xde, 0xeb, 0xa9, 0xc4, - 0x56, 0xa9, 0xd7, 0xde, 0x4b, 0xe5, 0x4b, 0xa1, 0x42, 0x6a, 0x5f, 0xe3, - 0xb2, 0xc7, 0xda, 0xfb, 0xc7, 0x70, 0x64, 0xe0, 0x68, 0x19, 0xc6, 0x11, - 0x77, 0x2b, 0x5f, 0xba, 0x1d, 0x58, 0x77, 0x98, 0x2c, 0x91, 0xb4, 0xd2, - 0xea, 0x1b, 0xdc, 0xe8, 0xfa, 0x82, 0xf3, 0x6e, 0xac, 0x88, 0x15, 0x16, - 0x1a, 0x53, 0xb3, 0x01, 0x94, 0x03, 0x47, 0x20, 0xdb, 0x71, 0xcb, 0x71, - 0xe8, 0x62, 0xad, 0x34, 0x2b, 0xa3, 0xa5, 0xe9, 0xa6, 0x82, 0x0e, 0x16, - 0x61, 0xbc, 0x29, 0x6b, 0xb1, 0x60, 0x67, 0x80, 0x9a, 0x9f, 0xc4, 0x82, - 0xf6, 0xb0, 0x7a, 0x16, 0x9c, 0x25, 0x04, 0xeb, 0xfd, 0xe0, 0x18, 0xd3, - 0xfc, 0xeb, 0xe1, 0x3c, 0x2b, 0x29, 0x7b, 0x32, 0x4e, 0xd3, 0x6d, 0xe1, - 0x27, 0xda, 0xc9, 0x14, 0x5c, 0x7f, 0xfa, 0x70, 0x41, 0x8e, 0xb4, 0xa3, - 0xde, 0x36, 0x92, 0x67, 0x97, 0xe2, 0xec, 0x85, 0x8b, 0x76, 0x08, 0x3c, - 0x32, 0x58, 0xd4, 0x7f, 0x6f, 0x91, 0x03, 0xdb, 0x19, 0x3e, 0xc4, 0x8b, - 0x3c, 0xb7, 0x75, 0x90, 0x71, 0x7a, 0x21, 0x9d, 0xa7, 0x77, 0xbf, 0xf5, - 0x92, 0x57, 0x46, 0x07, 0xa7, 0xbb, 0x0c, 0x42, 0xca, 0x4f, 0x5a, 0x27, - 0x45, 0x69, 0xfe, 0x6d, 0x78, 0x43, 0x77, 0xc4, 0xb4, 0x43, 0xff, 0x37, - 0x0d, 0xb7, 0xfa, 0xe9, 0x9e, 0x06, 0x70, 0x53, 0xfd, 0xf6, 0xa0, 0x28, - 0x84, 0x46, 0xcd, 0x61, 0xa2, 0x95, 0xc4, 0x1e, 0x6a, 0x13, 0xa1, 0x7f, - 0xaf, 0xe1, 0x73, 0x85, 0xb0, 0x53, 0x9c, 0x08, 0xb6, 0x1d, 0x4d, 0xb4, - 0x0b, 0xfb, 0x1f, 0x0c, 0x7b, 0x17, 0x06, 0x73, 0xa7, 0x22, 0x1f, 0xb0, - 0xd8, 0x45, 0x6e, 0xe5, 0xde, 0x48, 0xb7, 0x9f, 0x5a, 0xa8, 0xd1, 0xc3, - 0x04, 0xd1, 0x87, 0xec, 0x15, 0x3e, 0xd1, 0xc7, 0x57, 0x01, 0x46, 0x4b, - 0x28, 0xa8, 0x79, 0x5a, 0x7e, 0x0b, 0x56, 0x56, 0x28, 0xda, 0x35, 0xea, - 0x4c, 0x14, 0x81, 0xae, 0xc0, 0x0d, 0x12, 0xfe, 0x2d, 0xb7, 0x95, 0x4d, - 0xea, 0x78, 0xb6, 0x53, 0xcf, 0xac, 0x8a, 0xfc, 0xc9, 0x07, 0x9f, 0x93, - 0xf0, 0x11, 0x86, 0x13, 0xe9, 0xca, 0x3d, 0xce, 0xb1, 0xfd, 0x1a, 0x0a, - 0x8b, 0x11, 0x82, 0x94, 0x6a, 0xae, 0xc5, 0x80, 0x6a, 0x3b, 0xa8, 0x7c, - 0xb4, 0x53, 0x4e, 0xa9, 0x04, 0x1a, 0x4f, 0xb0, 0xb9, 0x95, 0x96, 0xa5, - 0xfd, 0xce, 0xdc, 0x57, 0x00, 0x48, 0x16, 0xe2, 0x40, 0xae, 0x04, 0xf5, - 0x83, 0x60, 0x23, 0xd9, 0x8e, 0x59, 0x56, 0x20, 0x50, 0x38, 0xc4, 0xde, - 0x88, 0x9f, 0x91, 0x06, 0xdb, 0x8f, 0x84, 0xa2, 0xaf, 0x61, 0xdd, 0x48, - 0x03, 0x4f, 0xc4, 0xb8, 0xed, 0x12, 0xd2, 0x74, 0x08, 0xb9, 0x51, 0x63, - 0xb5, 0xfe, 0x09, 0x7f, 0x7b, 0x8c, 0x5e, 0xd7, 0x27, 0xe5, 0x79, 0xe6, - 0x33, 0x60, 0x54, 0xe1, 0x21, 0xda, 0xca, 0x8b, 0x81, 0xdf, 0xb6, 0xa7, - 0x2e, 0x9d, 0x0f, 0xfc, 0x05, 0x80, 0x67, 0xcb, 0xc5, 0xdf, 0xc7, 0x13, - 0xee, 0xb5, 0x40, 0x8e, 0xa7, 0x0c, 0xcb, 0xf2, 0x45, 0x15, 0x29, 0xb1, - 0xb8, 0x02, 0x23, 0x61, 0x38, 0xf1, 0x16, 0xa1, 0x0c, 0xa1, 0xc9, 0x40, - 0x8c, 0xd0, 0x48, 0x4b, 0xce, 0x9c, 0x1e, 0x53, 0x40, 0x44, 0xf6, 0x17, - 0x16, 0xc6, 0x5c, 0xb0, 0x2a, 0x29, 0x59, 0x87, 0x67, 0x85, 0xa7, 0x81, - 0x84, 0xe9, 0x4f, 0xe5, 0x4e, 0x13, 0x5a, 0x11, 0xa1, 0x24, 0x62, 0xe9, - 0x7a, 0xea, 0x51, 0xaa, 0x45, 0xf3, 0x1d, 0x2a, 0xaf, 0x01, 0x28, 0x35, - 0xda, 0xb4, 0xe7, 0xab, 0xc1, 0xb9, 0x3c, 0x45, 0xa2, 0x0b, 0x5d, 0x40, - 0x09, 0xac, 0x62, 0x16, 0xd3, 0x1f, 0x9f, 0xc7, 0x1a, 0x56, 0xb7, 0x27, - 0xd1, 0x1b, 0xe1, 0xb5, 0x82, 0x9e, 0xe8, 0xd3, 0x5c, 0x0f, 0xe8, 0x87, - 0x61, 0xc6, 0x20, 0xb7, 0x31, 0x3f, 0x0d, 0xb3, 0x0a, 0x5a, 0xce, 0x06, - 0xa5, 0xe9, 0xfd, 0xf3, 0x29, 0x1a, 0xcd, 0x86, 0x0e, 0x31, 0x29, 0xaa, - 0xb7, 0x32, 0xf1, 0x10, 0x4e, 0x92, 0x12, 0x00, 0xc0, 0xac, 0x50, 0x4b, - 0x52, 0x59, 0x51, 0x7c, 0xa8, 0x0c, 0xf7, 0xcb, 0x16, 0x73, 0x7b, 0x90, - 0xa8, 0x57, 0x79, 0xb4, 0x73, 0x53, 0xd7, 0xed, 0xba, 0x46, 0xc5, 0x06, - 0x53, 0x02, 0xc7, 0x58, 0x4c, 0x09, 0x0c, 0xa5, 0x01, 0x13, 0x18, 0x39, - 0x4b, 0x4e, 0xc2, 0x0d, 0xd6, 0xdf, 0xaa, 0x7e, 0x46, 0xba, 0x6e, 0xcc, - 0x25, 0x42, 0xd0, 0xb3, 0x31, 0xdc, 0xdf, 0x7d, 0xf1, 0xc3, 0x73, 0xca, - 0x7a, 0xf6, 0xcb, 0x23, 0x81, 0x8d, 0xbe, 0x0b, 0xf2, 0x79, 0x8d, 0x14, - 0xa4, 0xc8, 0x36, 0x18, 0x49, 0xc8, 0x0d, 0xd7, 0xc9, 0xdd, 0x35, 0xeb, - 0xec, 0x52, 0x56, 0xae, 0xf2, 0xd2, 0x51, 0x91, 0x39, 0xbc, 0xb0, 0x49, - 0xb7, 0xf2, 0x1b, 0x64, 0x83, 0x5a, 0xa6, 0x97, 0xc2, 0x15, 0x95, 0xdc, - 0x11, 0xd2, 0x89, 0xc0, 0x6a, 0xb1, 0x44, 0x43, 0x38, 0xb6, 0x54, 0x0f, - 0xdc, 0xcb, 0xed, 0x26, 0x27, 0xd9, 0x46, 0x56, 0x4e, 0x6a, 0x54, 0x74, - 0x0f, 0x45, 0xfc, 0xb6, 0x93, 0xab, 0x3c, 0xd1, 0x86, 0x51, 0xaf, 0xa9, - 0x4a, 0xc0, 0x9c, 0x78, 0xc1, 0xb1, 0xc7, 0xf1, 0x9c, 0xd1, 0xd0, 0x32, - 0x4e, 0x4b, 0x02, 0x36, 0x68, 0x38, 0x88, 0x56, 0xc0, 0x2b, 0x12, 0x05, - 0x3b, 0xb9, 0xf6, 0xa2, 0x37, 0xe7, 0xbc, 0x81, 0xf9, 0x75, 0x51, 0x27, - 0x56, 0x0d, 0x55, 0xd1, 0x6a, 0xe0, 0xcf, 0x87, 0x0a, 0x44, 0xc6, 0x57, - 0xe1, 0x1b, 0xc0, 0x2c, 0xcf, 0xab, 0x77, 0xe9, 0x14, 0xf5, 0x34, 0x89, - 0xfb, 0xc9, 0xf2, 0x87, 0x5c, 0x75, 0xba, 0x51, 0x9a, 0x49, 0xe9, 0x23, - 0x23, 0xf4, 0xc9, 0xd1, 0x2f, 0x87, 0xf6, 0x75, 0x38, 0x97, 0x48, 0xb8, - 0x30, 0x46, 0x1d, 0x46, 0x65, 0x03, 0x10, 0xcf, 0xfb, 0x36, 0xf2, 0xb1, - 0xaf, 0x31, 0x02, 0x7b, 0x74, 0xfe, 0x9f, 0x8c, 0x73, 0x04, 0xfd, 0xb5, - 0xae, 0x2e, 0x27, 0x9c, 0xd8, 0x73, 0xbc, 0xc3, 0x4a, 0x76, 0x93, 0x66, - 0xf6, 0xb7, 0x90, 0xc4, 0x42, 0x3d, 0xcd, 0xb5, 0xf1, 0x75, 0xbf, 0xb7, - 0xdd, 0x8e, 0xb7, 0xcd, 0x90, 0x35, 0xf5, 0x95, 0x3d, 0xe4, 0x4e, 0xb0, - 0x7c, 0x5f, 0xad, 0xff, 0x75, 0x38, 0xc4, 0xc7, 0xed, 0xec, 0x70, 0xcc, - 0x9f, 0xf9, 0x77, 0xa1, 0x00, 0x2f, 0xf1, 0xa2, 0xc9, 0x74, 0xdc, 0x18, - 0x14, 0xd0, 0x2f, 0x86, 0x66, 0xa7, 0x5b, 0x39, 0x5c, 0xba, 0x0e, 0x77, - 0x16, 0x04, 0xc3, 0x02, 0x42, 0x3b, 0x66, 0x29, 0xee, 0x65, 0x00, 0xd4, - 0x22, 0x5a, 0x77, 0x74, 0xd4, 0xc3, 0xf3, 0x00, 0xdf, 0x6b, 0xc3, 0x15, - 0x89, 0x0e, 0xb1, 0xbc, 0xac, 0xe8, 0x44, 0x2f, 0x80, 0x34, 0x34, 0x8b, - 0x0c, 0x48, 0x45, 0xc2, 0x6a, 0xa3, 0x67, 0xd7, 0x3d, 0x36, 0xf3, 0x3f, - 0xe5, 0xf0, 0x5b, 0xe8, 0xad, 0x41, 0xd5, 0x82, 0xc1, 0x28, 0xab, 0x77, - 0xe8, 0x7f, 0xb3, 0xf6, 0xd2, 0x0c, 0xe4, 0x03, 0xcf, 0xe4, 0x72, 0xdb, - 0x7b, 0x81, 0xf4, 0xf3, 0x48, 0x74, 0xe1, 0x91, 0xb8, 0xf8, 0x4c, 0x2c, - 0x60, 0x99, 0x3e, 0x1e, 0x4f, 0xaf, 0x12, 0xab, 0x52, 0xef, 0xc7, 0x60, - 0xd2, 0xfe, 0x62, 0x55, 0xc8, 0x18, 0xad, 0x60, 0xa7, 0x5d, 0xde, 0x4d, - 0xfc, 0x6d, 0xe1, 0x10, 0x7c, 0xf9, 0xa2, 0x64, 0x00, 0x16, 0x1f, 0x44, - 0x7c, 0xe2, 0x72, 0x37, 0xd9, 0x92, 0xad, 0xfc, 0x62, 0x53, 0xbe, 0xb6, - 0xe0, 0xc8, 0xe0, 0xa2, 0xef, 0x22, 0x4b, 0x70, 0x3a, 0x4f, 0xc9, 0xed, - 0x6b, 0xbc, 0x17, 0x0a, 0xcf, 0x6a, 0x2c, 0xd3, 0xd2, 0x6b, 0x02, 0x45, - 0xfa, 0x9e, 0xc2, 0x21, 0x28, 0xfc, 0x07, 0x68, 0xd6, 0xb8, 0x9f, 0x2a, - 0x0b, 0x7a, 0x0e, 0xbc, 0x4e, 0xee, 0x84, 0x38, 0xe4, 0x8e, 0x70, 0xc3, - 0xc4, 0xad, 0x74, 0x87, 0x2d, 0x16, 0x4f, 0xa1, 0xf8, 0x20, 0xf5, 0xde, - 0xa3, 0xc5, 0x0c, 0x3b, 0xde, 0x44, 0x48, 0x0f, 0x3c, 0xdc, 0x7e, 0x10, - 0x8b, 0x87, 0xc4, 0x3b, 0xb0, 0x95, 0xbf, 0x61, 0x1e, 0xad, 0x07, 0x52, - 0xfd, 0x0b, 0x84, 0xa9, 0x46, 0xb0, 0x32, 0xd5, 0x22, 0x80, 0x35, 0x26, - 0x41, 0xf8, 0x11, 0x72, 0xb1, 0x31, 0x6f, 0x5a, 0x75, 0xcc, 0x67, 0xe0, - 0xb2, 0x50, 0x89, 0xb2, 0x66, 0x6e, 0xee, 0xa0, 0x41, 0x8d, 0x00, 0x2a, - 0xa7, 0x9d, 0xa5, 0x11, 0x2b, 0x07, 0x95, 0x3a, 0x55, 0x8c, 0x67, 0xb1, - 0xe5, 0x2d, 0xd4, 0xd1, 0x3e, 0x29, 0xed, 0xa5, 0x59, 0x97, 0x7b, 0xdf, - 0x92, 0x10, 0x0b, 0x04, 0x89, 0x27, 0xa0, 0xa2, 0x93, 0x18, 0x7f, 0x47, - 0x84, 0x1c, 0xc6, 0xd6, 0x8f, 0x73, 0x81, 0xa0, 0xfa, 0xe5, 0x3e, 0xd8, - 0xbf, 0x56, 0x1a, 0x76, 0xf4, 0xc4, 0x0f, 0x7a, 0x29, 0x9d, 0x32, 0x5d, - 0x41, 0xe0, 0x07, 0xb9, 0xd3, 0x3f, 0x7e, 0xff, 0x90, 0x89, 0xce, 0xdc, - 0xf1, 0x1d, 0x54, 0xb6, 0x67, 0x7f, 0x4d, 0x71, 0x9a, 0x4a, 0x5f, 0x80, - 0x0d, 0x5c, 0x77, 0xd5, 0x50, 0x7c, 0x41, 0x56, 0x7e, 0x99, 0x0a, 0xeb, - 0x66, 0x1f, 0xd2, 0x55, 0xc3, 0xc6, 0x6c, 0xc5, 0xfc, 0x34, 0x40, 0x2c, - 0x05, 0x29, 0x05, 0x7c, 0xca, 0xe6, 0x8d, 0xd3, 0xb0, 0xca, 0x84, 0x27, - 0x50, 0x7c, 0x6b, 0x17, 0x1b, 0x22, 0xe4, 0x7f, 0xe6, 0x44, 0x94, 0x06, - 0x4b, 0xb3, 0xb7, 0xbb, 0x98, 0x81, 0x44, 0x0b, 0xf5, 0x66, 0xcb, 0xad, - 0xf2, 0x9a, 0xe1, 0x47, 0xf3, 0x97, 0xa9, 0xb2, 0xc2, 0xca, 0xcd, 0x98, - 0x78, 0x60, 0xdc, 0x6e, 0x87, 0x55, 0x47, 0xf3, 0xae, 0x84, 0xdd, 0x9a, - 0xe9, 0x1a, 0x63, 0x83, 0xea, 0x23, 0x09, 0x67, 0x34, 0x83, 0x00, 0x6e, - 0x5e, 0x58, 0xb8, 0x89, 0x04, 0x08, 0x0a, 0x55, 0x9e, 0x78, 0xc9, 0xff, - 0xb9, 0xb5, 0x2c, 0xdd, 0x3b, 0x0c, 0x58, 0x07, 0x8b, 0xb4, 0x6a, 0xc4, - 0x64, 0xa3, 0x5e, 0x5b, 0xfe, 0x4d, 0xd0, 0x74, 0x01, 0x1b, 0xdf, 0x10, - 0x45, 0x2b, 0xd6, 0x9e, 0xa9, 0x60, 0x1f, 0xad, 0x46, 0xa1, 0x8c, 0xf8, - 0xf6, 0xa9, 0x8a, 0x27, 0xea, 0x51, 0x37, 0x84, 0xcf, 0xe5, 0xd7, 0x51, - 0xd6, 0x40, 0x39, 0x39, 0x5f, 0xf6, 0x96, 0x33, 0xd9, 0x86, 0x8d, 0x38, - 0xb6, 0x26, 0x04, 0x14, 0x07, 0x46, 0x3e, 0xd0, 0xc5, 0xf6, 0x0d, 0xa0, - 0x47, 0x2b, 0xc8, 0x73, 0x18, 0x6b, 0xd3, 0x0e, 0x18, 0xcc, 0x43, 0x98, - 0xd0, 0xcf, 0x1c, 0xe4, 0x4a, 0x41, 0x6a, 0x56, 0x2d, 0xf0, 0x93, 0x89, - 0x81, 0x6c, 0xce, 0x04, 0x1a, 0x23, 0x05, 0x91, 0x4f, 0x48, 0x44, 0x3a, - 0xaa, 0x03, 0xa5, 0x4a, 0xa9, 0x20, 0x2c, 0xbe, 0x6a, 0x81, 0xe6, 0xa9, - 0xf8, 0xf0, 0x2b, 0x29, 0xa1, 0xe0, 0xc4, 0xce, 0xf5, 0xda, 0x25, 0x70, - 0x49, 0xcc, 0xa0, 0x4b, 0x24, 0x49, 0x4f, 0x11, 0xc4, 0x3b, 0x22, 0x89, - 0x9a, 0xb4, 0xf4, 0xcd, 0xa3, 0xee, 0xb0, 0x76, 0x13, 0xc4, 0xbb, 0xaf, - 0x03, 0x7f, 0x27, 0xf3, 0x38, 0xbc, 0xde, 0x7c, 0x0c, 0x39, 0x14, 0xb7, - 0x14, 0xbb, 0x5c, 0xae, 0x89, 0xf8, 0xf7, 0xd6, 0x00, 0x78, 0xf4, 0xb0, - 0x52, 0x16, 0xf5, 0x54, 0xc5, 0x93, 0xf7, 0x6d, 0x0d, 0xe8, 0x58, 0xe2, - 0xa1, 0xa7, 0xdc, 0x49, 0xdb, 0xc8, 0x79, 0xbc, 0xc3, 0x97, 0x7b, 0x6c, - 0x82, 0x7b, 0xbe, 0xe9, 0x79, 0xac, 0x4a, 0xa4, 0x7c, 0x49, 0x83, 0x58, - 0x3a, 0xe4, 0xf5, 0x68, 0x5c, 0xb7, 0x7f, 0x2d, 0xfe, 0x6b, 0x96, 0xc7, - 0x8b, 0x67, 0xb5, 0xd0, 0xa1, 0x0a, 0x16, 0x62, 0x64, 0x53, 0xea, 0x29, - 0x80, 0x93, 0xf9, 0xd6, 0xa0, 0xc5, 0x1b, 0x3a, 0x1e, 0xab, 0x51, 0x88, - 0xe0, 0x9e, 0xd4, 0xf6, 0xbf, 0x70, 0x2d, 0x29, 0x2e, 0x08, 0xa9, 0x31, - 0x78, 0x0a, 0x15, 0x30, 0x9f, 0x2e, 0xc8, 0x41, 0x65, 0x8e, 0x97, 0x51, - 0x5e, 0x73, 0x46, 0x42, 0x74, 0x84, 0xfd, 0x9b, 0x4a, 0x8a, 0x68, 0x28, - 0x45, 0xd0, 0x5d, 0x65, 0x08, 0xb3, 0xf5, 0x40, 0x8a, 0x29, 0x8e, 0x70, - 0x02, 0x49, 0x6a, 0x01, 0xd6, 0x41, 0x4a, 0xf8, 0x15, 0xa3, 0x70, 0x59, - 0xe9, 0xa2, 0xe2, 0x76, 0x8c, 0x60, 0x33, 0xb3, 0xfa, 0x8b, 0xb4, 0x90, - 0x6f, 0x92, 0xc8, 0x21, 0x59, 0xc0, 0x3a, 0x30, 0x46, 0xeb, 0x49, 0xd8, - 0x85, 0x63, 0x5a, 0x23, 0x87, 0xe1, 0xa7, 0xc0, 0x1a, 0xb0, 0xc7, 0xc4, - 0x40, 0x4d, 0x11, 0x9c, 0xe3, 0xd4, 0x6b, 0xef, 0x68, 0xc8, 0x2c, 0x31, - 0xcd, 0x3e, 0xee, 0x55, 0x10, 0x67, 0x77, 0x7b, 0x30, 0xc1, 0xd0, 0x23, - 0x6c, 0x65, 0x6f, 0xfb, 0x2e, 0x62, 0x33, 0x42, 0x63, 0xdc, 0xca, 0x86, - 0xf1, 0x0e, 0xb3, 0xb0, 0x69, 0x11, 0x65, 0xe1, 0x6e, 0x6c, 0x03, 0x49, - 0x79, 0xe8, 0xf1, 0x2e, 0x8d, 0x94, 0xc8, 0xa8, 0x98, 0x2d, 0x3f, 0xfe, - 0xbd, 0x2d, 0x75, 0x45, 0xd1, 0x7a, 0x09, 0xf8, 0x90, 0x49, 0xbd, 0x4a, - 0x3b, 0xa4, 0xa3, 0x26, 0xb8, 0x62, 0x66, 0x97, 0xd9, 0xc1, 0xca, 0x12, - 0x49, 0xe1, 0x27, 0x93, 0x4f, 0x60, 0xfa, 0xb3, 0x4f, 0x4c, 0xdb, 0x87, - 0x6c, 0x3b, 0x50, 0x47, 0xe2, 0xd8, 0x5b, 0x13, 0x99, 0xf0, 0x2b, 0xbb, - 0x32, 0x33, 0xfd, 0x7d, 0x15, 0x0f, 0x2c, 0xee, 0x85, 0x83, 0xc0, 0x53, - 0x79, 0x3e, 0x51, 0xfe, 0x7c, 0x06, 0x73, 0x49, 0x49, 0x4f, 0x5a, 0x22, - 0x36, 0x8f, 0x30, 0x8a, 0xef, 0x84, 0xd6, 0x15, 0x26, 0x48, 0xe7, 0x1e, - 0xb1, 0xaa, 0x82, 0xd0, 0xc7, 0x0b, 0x97, 0x7b, 0x6c, 0x2d, 0x49, 0x7e, - 0x6d, 0xe7, 0xa3, 0x05, 0x80, 0xd7, 0x42, 0xa9, 0xc6, 0x66, 0x98, 0x30, - 0xe3, 0x8a, 0x79, 0x86, 0x9c, 0x2b, 0xbc, 0x4a, 0xe6, 0x0d, 0xc5, 0xe5, - 0x1a, 0x92, 0xd9, 0xef, 0x63, 0x52, 0x03, 0x88, 0x36, 0xc5, 0x83, 0x65, - 0xf8, 0xf1, 0x87, 0xce, 0x43, 0xfe, 0x89, 0x58, 0x07, 0x6a, 0xad, 0x85, - 0x37, 0x0f, 0xdf, 0x9e, 0xa5, 0x62, 0xa9, 0xd2, 0x41, 0x3f, 0x7f, 0xb7, - 0xf1, 0xe2, 0x58, 0xb5, 0xda, 0xdf, 0xd1, 0xba, 0x36, 0x2c, 0xe7, 0x43, - 0x31, 0x07, 0xc5, 0xf5, 0x79, 0xc9, 0x31, 0xd7, 0x1d, 0x97, 0x57, 0x9a, - 0x8e, 0x3f, 0xac, 0x00, 0x49, 0x00, 0x2f, 0xad, 0xac, 0xe7, 0x65, 0x7c, - 0xbf, 0xec, 0x85, 0x57, 0xe6, 0xcc, 0x07, 0x34, 0x02, 0x36, 0xa8, 0x6a, - 0x9f, 0x3a, 0x9a, 0x2f, 0x34, 0x93, 0x1f, 0x7d, 0x38, 0x54, 0xe3, 0x54, - 0x54, 0xee, 0x84, 0x55, 0xe1, 0x0d, 0xc1, 0x08, 0x3e, 0x33, 0x9e, 0x2a, - 0xc3, 0x6a, 0x83, 0xc4, 0x75, 0xed, 0xbc, 0x5f, 0xd9, 0x04, 0xd7, 0x77, - 0x91, 0xb1, 0xa0, 0xf2, 0xef, 0x81, 0xb0, 0x8b, 0x53, 0x5f, 0x71, 0xec, - 0xa5, 0x0b, 0xbe, 0xf2, 0x92, 0x7e, 0x0a, 0x34, 0xeb, 0x5d, 0x65, 0xc7, - 0xa9, 0x44, 0x10, 0xfb, 0xd3, 0xef, 0xe1, 0xbc, 0x06, 0x65, 0x68, 0x22, - 0xfb, 0x43, 0x2c, 0xcf, 0x8e, 0x6a, 0x28, 0xdb, 0x0b, 0xf4, 0xaf, 0x01, - 0x65, 0x97, 0xd6, 0xe5, 0x91, 0x20, 0x13, 0x2c, 0xb1, 0xc2, 0xd3, 0xc3, - 0x76, 0x90, 0xf8, 0xcd, 0x00, 0xde, 0x93, 0xf8, 0x4e, 0xcc, 0xdc, 0xca, - 0x9a, 0xf0, 0xbd, 0x9b, 0xd6, 0x57, 0xb1, 0x13, 0xd9, 0xe0, 0xe1, 0x9e, - 0x21, 0x74, 0xa9, 0x76, 0xc0, 0x0c, 0xad, 0x4f, 0x5d, 0xfe, 0x23, 0x32, - 0x5a, 0x10, 0x75, 0x5b, 0x05, 0xdf, 0xdc, 0x5b, 0x94, 0xcb, 0xe1, 0x9f, - 0x13, 0x51, 0xf5, 0x50, 0x36, 0x3b, 0xf2, 0x90, 0x9c, 0x9a, 0xc8, 0x10, - 0x88, 0xa9, 0xec, 0x22, 0x1e, 0x96, 0x70, 0xe8, 0x9e, 0x69, 0xc1, 0x22, - 0xd9, 0x14, 0x15, 0x2e, 0xbc, 0x03, 0x96, 0x9e, 0x1d, 0x00, 0x10, 0x16, - 0x4f, 0x56, 0xf0, 0x29, 0x47, 0x0a, 0x45, 0x34, 0x27, 0x21, 0x3b, 0x67, - 0x33, 0xf9, 0xdd, 0x29, 0x3a, 0xf2, 0xe4, 0x56, 0x34, 0x46, 0xbe, 0xd8, - 0x42, 0x29, 0x11, 0x7f, 0x30, 0xc1, 0xbe, 0xa5, 0xc8, 0x9d, 0x7b, 0x2e, - 0x4e, 0xcf, 0xba, 0x91, 0xb4, 0xbf, 0x0a, 0x04, 0x00, 0x49, 0x83, 0x6b, - 0x46, 0x5f, 0x3b, 0xfa, 0xf7, 0x40, 0x8d, 0x85, 0x47, 0x14, 0x58, 0xb3, - 0xa5, 0x66, 0x30, 0xfd, 0x4a, 0x80, 0xa4, 0x61, 0x3b, 0x7c, 0xb4, 0xcc, - 0x34, 0x8c, 0xc6, 0xb6, 0x10, 0xa9, 0x76, 0xc9, 0x11, 0xd7, 0x8a, 0x51, - 0x86, 0x17, 0x89, 0x28, 0xab, 0xd5, 0x03, 0x88, 0x74, 0x5b, 0x81, 0xbd, - 0x3a, 0x57, 0xfe, 0x66, 0x25, 0xd0, 0x92, 0x15, 0x84, 0x02, 0x0f, 0x51, - 0xa8, 0x58, 0xcf, 0x77, 0x65, 0x10, 0x61, 0xe8, 0xe6, 0xab, 0xb1, 0xba, - 0x3b, 0x08, 0xd6, 0xba, 0x5f, 0xf5, 0x74, 0xc5, 0x07, 0x60, 0xfd, 0xd3, - 0xc8, 0x52, 0x4e, 0xdb, 0xc3, 0xe3, 0x6d, 0x81, 0x20, 0x51, 0x01, 0x9a, - 0x5e, 0x32, 0x4e, 0x80, 0x5a, 0xcb, 0x83, 0xd7, 0xa4, 0xd9, 0xfb, 0xed, - 0x3d, 0x80, 0xa1, 0x83, 0x81, 0x91, 0xc0, 0x0b, 0xff, 0x67, 0xd8, 0x8b, - 0xd0, 0x12, 0x0b, 0xd4, 0x2b, 0x8e, 0x0d, 0x0f, 0xfc, 0xc7, 0xb3, 0xf1, - 0xe3, 0xf3, 0x5e, 0x0c, 0xb6, 0x6b, 0x9d, 0xdc, 0x22, 0x70, 0x31, 0x54, - 0xe8, 0x41, 0xfe, 0xa1, 0xe1, 0x4f, 0xfa, 0x81, 0xfb, 0xae, 0x72, 0x16, - 0xb8, 0x87, 0xc9, 0x31, 0x9d, 0x42, 0x47, 0x4a, 0x20, 0xae, 0x63, 0x16, - 0x0d, 0xfa, 0xf1, 0x27, 0x19, 0x47, 0xee, 0x45, 0x84, 0x29, 0x9a, 0xb6, - 0x42, 0xef, 0xbd, 0x15, 0xa8, 0x34, 0x33, 0x38, 0x9c, 0x9d, 0xbb, 0x5c, - 0x03, 0xf3, 0xcf, 0xcf, 0x6d, 0x2e, 0xd5, 0x88, 0xf8, 0xdd, 0xfc, 0xc0, - 0x4a, 0xdb, 0x69, 0xd9, 0x62, 0x89, 0x24, 0x46, 0xee, 0xa4, 0xb9, 0x95, - 0xe6, 0xaf, 0x7d, 0x53, 0xec, 0x41, 0xae, 0x70, 0xfe, 0x4f, 0x31, 0xe3, - 0xa2, 0x59, 0x2c, 0xa1, 0x53, 0x8b, 0xb6, 0x3b, 0x39, 0xc1, 0xa4, 0xa7, - 0x9e, 0xaa, 0x00, 0x60, 0x9a, 0x5f, 0x56, 0x51, 0xf3, 0x7b, 0x28, 0x84, - 0x36, 0x1a, 0xc1, 0x2d, 0xc8, 0xed, 0xf8, 0x48, 0x48, 0x1d, 0x39, 0x4d, - 0x3d, 0xce, 0x30, 0x90, 0x29, 0x33, 0x6f, 0x9a, 0xce, 0x58, 0xe7, 0x88, - 0xac, 0x59, 0xce, 0x85, 0x5a, 0x52, 0x2b, 0x6c, 0xb7, 0xe9, 0x2e, 0xa9, - 0xd9, 0x9a, 0xea, 0x1c, 0x47, 0xb2, 0x59, 0xff, 0x73, 0x76, 0x21, 0x40, - 0xe1, 0xde, 0x32, 0xb8, 0x73, 0x3d, 0xa5, 0x44, 0x66, 0x79, 0xa1, 0xfe, - 0xaf, 0xf6, 0x8a, 0x97, 0x09, 0x5c, 0x8b, 0x64, 0x38, 0x9f, 0xe1, 0x59, - 0x38, 0x18, 0xe9, 0xc0, 0xd6, 0xa2, 0xac, 0x74, 0xa9, 0xfd, 0x4a, 0x0d, - 0xf6, 0x47, 0x00, 0x2b, 0x09, 0x46, 0x38, 0x1c, 0xa4, 0x9f, 0x63, 0x20, - 0x18, 0x75, 0x5a, 0xb8, 0xc4, 0xbc, 0xd6, 0x6b, 0xc8, 0x14, 0x72, 0x03, - 0xe4, 0x05, 0xd4, 0x4e, 0x66, 0x20, 0x42, 0xa2, 0x8f, 0x96, 0xe7, 0xaf, - 0xd3, 0xfb, 0xa8, 0x88, 0x9b, 0xe3, 0xaa, 0xcd, 0xab, 0xce, 0x8f, 0x07, - 0x6d, 0xef, 0x98, 0xce, 0xdb, 0x42, 0x5b, 0xf4, 0x61, 0x57, 0x62, 0x27, - 0x8a, 0x53, 0x5e, 0xf8, 0x3e, 0xf6, 0x7f, 0xde, 0x5e, 0x3b, 0x1b, 0x13, - 0x2e, 0x30, 0x46, 0x4b, 0x6b, 0xb7, 0xbb, 0x33, 0x31, 0xc0, 0xfa, 0x40, - 0xab, 0x68, 0x72, 0xe3, 0x92, 0x30, 0x47, 0xd6, 0x30, 0x60, 0x42, 0x5b, - 0x88, 0x8d, 0xa6, 0x56, 0xe4, 0xac, 0x33, 0x2e, 0xca, 0x05, 0x1f, 0x60, - 0xaf, 0xde, 0x7f, 0xa9, 0xda, 0x3f, 0xa8, 0x21, 0xf6, 0xfc, 0x98, 0x7d, - 0xc4, 0x1e, 0xb0, 0xa9, 0x56, 0x2d, 0x8d, 0xea, 0x03, 0x51, 0x48, 0xac, - 0xe8, 0x22, 0xc7, 0x8b, 0xef, 0x91, 0x0e, 0xcf, 0x0c, 0xe9, 0x38, 0x43, - 0x99, 0xa8, 0x98, 0x4f, 0xfa, 0xe3, 0x03, 0xa6, 0x4f, 0xd4, 0x0d, 0x98, - 0x5b, 0x50, 0x28, 0xd7, 0xe7, 0x46, 0xd7, 0xad, 0x43, 0xb8, 0x56, 0x2a, - 0x2f, 0x7c, 0x39, 0x67, 0xf4, 0x62, 0x0e, 0xc0, 0xa8, 0x87, 0xb5, 0x81, - 0xe2, 0x13, 0x9f, 0xe4, 0xdd, 0x72, 0xf2, 0x07, 0xca, 0xac, 0x6d, 0xb2, - 0x96, 0x53, 0x5a, 0x8f, 0x66, 0x3c, 0xb4, 0xc1, 0x4f, 0x9a, 0x82, 0x55, - 0xcf, 0x0e, 0x27, 0x5f, 0xc7, 0xd2, 0x28, 0x27, 0x7f, 0x22, 0x6e, 0xa5, - 0xe7, 0x32, 0x56, 0x51, 0x18, 0xe0, 0x85, 0x6d, 0x1f, 0xfc, 0x25, 0x08, - 0x18, 0x60, 0x57, 0xfc, 0x66, 0x94, 0x2c, 0x4c, 0xbe, 0x00, 0xab, 0x9e, - 0x73, 0x9b, 0x06, 0xd3, 0xb5, 0x24, 0xa8, 0x8f, 0xb1, 0x33, 0x99, 0x4c, - 0xb4, 0x13, 0x07, 0xcd, 0x04, 0xdd, 0x77, 0xdc, 0xee, 0x96, 0x02, 0x59, - 0xe8, 0x22, 0x07, 0x16, 0x2e, 0x41, 0xc9, 0xc4, 0x59, 0x70, 0x37, 0x0f, - 0x14, 0xc9, 0xcf, 0x90, 0x57, 0xc2, 0x0d, 0xa3, 0xd7, 0x66, 0xb6, 0x7d, - 0x10, 0xd4, 0xfc, 0x18, 0x66, 0xad, 0xea, 0x5e, 0x64, 0x6c, 0x12, 0x66, - 0x3d, 0x96, 0xa5, 0xa8, 0x9c, 0x49, 0x5c, 0xd4, 0x8d, 0x1c, 0xc3, 0x38, - 0xfe, 0x53, 0xc2, 0x71, 0xd1, 0xc6, 0x41, 0xe2, 0xb9, 0x17, 0x74, 0x6e, - 0xcc, 0xf8, 0x72, 0x28, 0x38, 0x4e, 0x54, 0x9b, 0x0e, 0xa3, 0x3a, 0x43, - 0x5c, 0xd5, 0x83, 0x06, 0xbb, 0x46, 0x16, 0x6e, 0xe3, 0x8a, 0xd5, 0x1e, - 0x7f, 0x88, 0x62, 0xac, 0x35, 0x89, 0xfb, 0xbe, 0x96, 0x1d, 0x87, 0x37, - 0xb7, 0x91, 0x63, 0xae, 0x77, 0x7b, 0x66, 0x60, 0xc1, 0x3e, 0x80, 0x56, - 0xb1, 0xc8, 0x0d, 0x16, 0xde, 0x38, 0x82, 0x66, 0x99, 0x2b, 0x35, 0xd8, - 0xb4, 0x5b, 0x4b, 0x3e, 0x93, 0x96, 0x59, 0xf8, 0x96, 0x7e, 0x7b, 0x27, - 0xf4, 0x62, 0xb7, 0xda, 0x89, 0xa7, 0x34, 0x47, 0xed, 0xb3, 0x42, 0x20, - 0xeb, 0xcd, 0xf6, 0xa3, 0x9f, 0xf7, 0x48, 0x91, 0x17, 0xd2, 0x21, 0xed, - 0x5a, 0x22, 0x39, 0xc9, 0x76, 0x95, 0x36, 0xd9, 0x97, 0x0f, 0x19, 0xce, - 0xd3, 0xbc, 0x74, 0x7d, 0x53, 0x37, 0x3b, 0x4a, 0x97, 0xb7, 0xf8, 0x7e, - 0xdd, 0x4c, 0x5f, 0xae, 0x5c, 0x0b, 0xab, 0x4c, 0x34, 0xa1, 0x7e, 0x34, - 0x35, 0xf4, 0xfc, 0x92, 0xab, 0x2e, 0x6a, 0x15, 0xce, 0x84, 0xae, 0x70, - 0xae, 0x85, 0x21, 0xe6, 0x41, 0x13, 0x31, 0xe0, 0x8f, 0xab, 0x82, 0xe3, - 0x09, 0xaf, 0xa4, 0x7c, 0xb4, 0xb9, 0xb7, 0xc0, 0x67, 0x08, 0xc9, 0x9d, - 0xcd, 0x0b, 0x3c, 0xa0, 0x0c, 0xde, 0x49, 0x2f, 0x40, 0x19, 0x95, 0x64, - 0xb9, 0x7c, 0x2a, 0x72, 0xdd, 0xa2, 0x92, 0x0a, 0x21, 0xeb, 0x8c, 0xc3, - 0x6d, 0x52, 0xe7, 0x05, 0x50, 0x01, 0x55, 0x19, 0x2f, 0xbd, 0x1b, 0x72, - 0x73, 0xfe, 0x82, 0x9f, 0xbf, 0xa0, 0xfe, 0x19, 0x7c, 0x42, 0x6d, 0x76, - 0x32, 0x47, 0x36, 0x15, 0x2e, 0xde, 0xe8, 0xe6, 0xca, 0x07, 0xa3, 0x6b, - 0x40, 0x99, 0x96, 0xcd, 0x19, 0xea, 0x7e, 0xc9, 0x87, 0x9d, 0x3d, 0xa0, - 0x82, 0x88, 0xe7, 0xe4, 0x34, 0x9f, 0xa5, 0x27, 0xdf, 0xae, 0x03, 0x37, - 0xa8, 0x35, 0x64, 0x02, 0x09, 0x09, 0x9e, 0xec, 0x38, 0x0a, 0xff, 0x79, - 0x8c, 0x9a, 0x87, 0x66, 0xcd, 0xe4, 0xf4, 0x9d, 0xa9, 0x07, 0x96, 0x36, - 0xae, 0x2e, 0x4e, 0xc5, 0xe9, 0x86, 0xb2, 0x8e, 0x71, 0x5d, 0xe8, 0xee, - 0x84, 0xf3, 0x30, 0x2a, 0x58, 0x1a, 0x80, 0xb8, 0xaa, 0xb8, 0x1d, 0xc4, - 0xae, 0x59, 0x91, 0xf3, 0x16, 0x9b, 0xa3, 0x8a, 0xa3, 0x26, 0xb2, 0x0a, - 0xe5, 0x58, 0xb7, 0x96, 0x87, 0xfb, 0x00, 0xe4, 0x50, 0x7c, 0xb1, 0x77, - 0x3a, 0x18, 0xc2, 0xe3, 0xc1, 0x12, 0xa6, 0x0d, 0x06, 0xeb, 0x80, 0x6c, - 0x5a, 0xee, 0x34, 0xcc, 0x1c, 0x87, 0x35, 0x46, 0x1d, 0x05, 0x83, 0xd8, - 0x91, 0x22, 0xaa, 0xf6, 0xad, 0x87, 0xab, 0x76, 0x18, 0x79, 0xe2, 0x09, - 0xc3, 0xa3, 0x15, 0x67, 0x3a, 0x7c, 0x0f, 0xa0, 0x4c, 0x7b, 0xfc, 0xfc, - 0xdd, 0x5c, 0xe4, 0x86, 0x58, 0x13, 0xb8, 0x97, 0xae, 0x8c, 0x75, 0xc8, - 0x02, 0x1e, 0x33, 0x45, 0xa9, 0x54, 0x09, 0x15, 0x53, 0x4f, 0x28, 0x47, - 0x4d, 0x5f, 0xd0, 0xc7, 0x09, 0xbd, 0x93, 0xb0, 0x08, 0x79, 0x05, 0xbc, - 0xbc, 0xaf, 0x2c, 0xbd, 0xbb, 0x21, 0xd1, 0x60, 0xb8, 0x81, 0x4c, 0x6c, - 0x5e, 0x45, 0x39, 0xa3, 0x31, 0x54, 0xb7, 0x82, 0xef, 0x86, 0xe4, 0x5e, - 0xca, 0xd6, 0xb8, 0x31, 0xa2, 0x4c, 0x84, 0x5b, 0xac, 0xe5, 0x29, 0xbf, - 0xbf, 0x89, 0xb4, 0x4c, 0xd3, 0x69, 0x66, 0x50, 0xeb, 0xda, 0x7d, 0x00, - 0xbb, 0x45, 0x0f, 0xe1, 0xd1, 0x30, 0x1a, 0xc6, 0x94, 0x66, 0xdc, 0x01, - 0x75, 0xce, 0xf8, 0xfc, 0xd9, 0xce, 0xcf, 0x1f, 0x9e, 0x5a, 0x55, 0xa4, - 0x3e, 0xe6, 0x51, 0xc7, 0x74, 0x40, 0x82, 0x09, 0xea, 0xa0, 0xf5, 0xb2, - 0x70, 0x9f, 0x0e, 0xfb, 0x46, 0x8a, 0x69, 0xbf, 0x07, 0x92, 0xdc, 0x74, - 0x03, 0x70, 0xc6, 0x44, 0x81, 0x66, 0x40, 0xc7, 0xf5, 0xb8, 0xf0, 0x45, - 0x0f, 0xca, 0xd8, 0xb0, 0x9e, 0x48, 0x94, 0xff, 0x85, 0xcb, 0x7b, 0xec, - 0x67, 0x5d, 0xfe, 0xe9, 0x13, 0xd1, 0x67, 0x95, 0xd9, 0x35, 0x9e, 0x8a, - 0x53, 0x4d, 0x6b, 0x9d, 0x42, 0x53, 0xb1, 0x6b, 0x51, 0x1e, 0x35, 0x40, - 0x81, 0x92, 0x91, 0x5f, 0x1f, 0x8e, 0xbe, 0x37, 0xd3, 0x85, 0xab, 0x85, - 0x37, 0x1c, 0x0f, 0xae, 0xd9, 0xf7, 0xa2, 0x75, 0x3d, 0xd9, 0xd7, 0x2a, - 0x80, 0xb0, 0x4c, 0x14, 0x04, 0x40, 0xc5, 0xba, 0x0e, 0xbe, 0xab, 0xcc, - 0x38, 0x35, 0x62, 0x6c, 0xa5, 0xce, 0x49, 0x15, 0x2a, 0x10, 0xb5, 0x6a, - 0xd2, 0x3b, 0xd2, 0x6a, 0xad, 0x2e, 0x34, 0x46, 0x8b, 0x78, 0x57, 0x6e, - 0xc4, 0xde, 0x65, 0x68, 0x05, 0x8f, 0xd6, 0x6e, 0x34, 0xb9, 0xaa, 0x80, - 0x77, 0xff, 0x6c, 0x1a, 0x37, 0x87, 0xdd, 0x33, 0x13, 0x33, 0xa7, 0xa9, - 0x3a, 0x90, 0x32, 0x7b, 0x9b, 0x21, 0x31, 0xc8, 0xf5, 0x4c, 0xa6, 0x73, - 0x42, 0x79, 0x46, 0x14, 0x1b, 0xef, 0xf4, 0x78, 0xd9, 0x7e, 0x6f, 0x31, - 0xaa, 0x59, 0x97, 0x34, 0xe5, 0xe6, 0x67, 0xf3, 0x86, 0xf5, 0x61, 0xe7, - 0x51, 0x6d, 0xce, 0xb3, 0xdc, 0x86, 0xc7, 0x55, 0x43, 0xfa, 0x38, 0x78, - 0xb0, 0x8d, 0x03, 0x9c, 0xe4, 0x6c, 0xca, 0x73, 0x94, 0xa1, 0x0c, 0xb8, - 0x11, 0xda, 0x0c, 0x0b, 0x18, 0x1b, 0xd0, 0x99, 0xe7, 0xa9, 0x0d, 0xc3, - 0x36, 0xd7, 0x8c, 0x16, 0xad, 0x16, 0x1f, 0xb2, 0x3c, 0x07, 0x32, 0x11, - 0x6c, 0xd2, 0x8f, 0x33, 0x37, 0x5c, 0x3e, 0x4f, 0x7a, 0x76, 0xf7, 0x85, - 0xcc, 0x68, 0x1a, 0xf9, 0x26, 0x74, 0x42, 0xc9, 0xea, 0x21, 0x7e, 0x74, - 0x3c, 0x4f, 0xde, 0xfb, 0xd7, 0x83, 0x62, 0x12, 0xc7, 0x4f, 0xfc, 0x47, - 0x18, 0x9d, 0xc5, 0xf5, 0xe9, 0xd7, 0xaa, 0x76, 0x20, 0x99, 0x79, 0xae, - 0x9b, 0x7a, 0xde, 0x8b, 0x95, 0xc2, 0xa5, 0xa3, 0x6a, 0x30, 0x9b, 0x99, - 0x63, 0x34, 0x7c, 0xd1, 0x53, 0xa1, 0x6c, 0xd6, 0xed, 0x7d, 0x8c, 0xba, - 0xc8, 0x21, 0xf3, 0xe1, 0x31, 0x55, 0x3d, 0x88, 0x87, 0x04, 0xc7, 0xc9, - 0x65, 0x0c, 0x53, 0x1e, 0xd4, 0xd9, 0xaa, 0xda, 0xc2, 0x14, 0x88, 0xf2, - 0x07, 0x2c, 0x12, 0x4d, 0x79, 0x54, 0xaa, 0xd9, 0x47, 0x95, 0xf9, 0x7e, - 0x26, 0x89, 0x4b, 0x63, 0x7e, 0x44, 0x06, 0x0e, 0xe2, 0x8d, 0x9a, 0x0a, - 0xc3, 0xee, 0x55, 0x13, 0x55, 0x04, 0xcc, 0xb5, 0x2e, 0xa0, 0x0d, 0xec, - 0x76, 0x84, 0xc1, 0x1e, 0xdd, 0xe6, 0xfa, 0x54, 0x6e, 0x38, 0x30, 0x6f, - 0xcc, 0xa4, 0x8d, 0x76, 0x1e, 0xa3, 0x8e, 0x2c, 0x5e, 0x37, 0xeb, 0x0b, - 0xf4, 0xb5, 0x80, 0xde, 0x58, 0x13, 0x5a, 0x52, 0xdc, 0x65, 0x99, 0x1a, - 0x1b, 0x75, 0x0c, 0xbd, 0x83, 0xe8, 0x90, 0x8e, 0xa9, 0xbf, 0x42, 0x22, - 0xe1, 0x3a, 0x31, 0x4e, 0x54, 0xad, 0xd4, 0x6f, 0x80, 0xb4, 0xb5, 0x82, - 0x05, 0x20, 0xd7, 0x38, 0xd7, 0xeb, 0x25, 0x33, 0xe9, 0x4b, 0xc3, 0x5e, - 0xd1, 0x11, 0xb0, 0xd9, 0x8e, 0x90, 0x48, 0x2a, 0xe3, 0xa0, 0x60, 0x16, - 0x70, 0xe3, 0xd1, 0x45, 0x11, 0x64, 0x91, 0x69, 0x87, 0x1c, 0xbb, 0x91, - 0xc4, 0x43, 0x12, 0x62, 0x99, 0x69, 0xe5, 0x96, 0x01, 0x15, 0xdb, 0xdf, - 0x05, 0x55, 0x34, 0xbb, 0xd6, 0x76, 0x89, 0xcd, 0xb5, 0x4f, 0x2e, 0xa7, - 0x6e, 0x15, 0xc9, 0xc0, 0x8e, 0xa8, 0x63, 0x79, 0x12, 0xfb, 0x7e, 0x69, - 0x8f, 0x52, 0x5e, 0xe7, 0x76, 0x16, 0x28, 0x76, 0xca, 0xcb, 0xd8, 0x0e, - 0x4a, 0x93, 0x9d, 0x16, 0x68, 0x98, 0xf8, 0xc3, 0x39, 0xb2, 0x2d, 0xea, - 0xba, 0x72, 0x16, 0x33, 0xb7, 0xec, 0x61, 0x9e, 0x94, 0x32, 0x01, 0x22, - 0xde, 0x66, 0xfd, 0x68, 0xfa, 0xcf, 0xf2, 0x52, 0x4f, 0x02, 0xe8, 0x25, - 0xd3, 0xa3, 0x5b, 0x29, 0xae, 0xe9, 0x62, 0xfa, 0xd6, 0x1a, 0x50, 0x80, - 0x95, 0x96, 0xdf, 0x00, 0xfc, 0x23, 0xf1, 0x95, 0xef, 0xbb, 0xf5, 0x23, - 0x9d, 0x6b, 0xd6, 0xed, 0xb4, 0xe2, 0x4a, 0xf6, 0xb8, 0x20, 0x83, 0x6b, - 0x45, 0x92, 0x29, 0x5a, 0x02, 0xe9, 0xf7, 0x8e, 0x5c, 0x02, 0xde, 0xb4, - 0x9a, 0xdf, 0x18, 0x10, 0x17, 0x7f, 0xd8, 0x2e, 0x17, 0xc0, 0xf0, 0x6b, - 0x3b, 0x88, 0x09, 0x58, 0xf2, 0x18, 0x22, 0x09, 0x80, 0x4a, 0xe0, 0x51, - 0x6f, 0x7a, 0x70, 0x09, 0x1f, 0xe5, 0xfa, 0xa9, 0x4d, 0x24, 0x1f, 0x18, - 0x1c, 0x74, 0xcd, 0x87, 0x04, 0xfd, 0x85, 0x33, 0x4c, 0x28, 0xbd, 0xa3, - 0x66, 0x6c, 0x99, 0x7e, 0x50, 0x5e, 0xb5, 0x22, 0x33, 0x92, 0xd4, 0xd8, - 0x82, 0x4e, 0x38, 0xbe, 0xcb, 0x3d, 0x5f, 0x19, 0xd1, 0x0f, 0x8b, 0xa1, - 0x78, 0x08, 0x1c, 0x10, 0x0b, 0x77, 0xa7, 0x39, 0x2e, 0x91, 0x83, 0xee, - 0x1d, 0x36, 0xd8, 0x77, 0x87, 0x8a, 0x38, 0x45, 0x3c, 0xbd, 0xb9, 0x88, - 0xbb, 0x1b, 0x20, 0xd1, 0x95, 0xb9, 0x8f, 0x03, 0x46, 0xfa, 0xab, 0x70, - 0x68, 0x26, 0xd9, 0xb1, 0x25, 0x52, 0x5a, 0x77, 0x2d, 0x92, 0xc2, 0x1d, - 0xb6, 0x6e, 0xec, 0x67, 0xef, 0x34, 0xe2, 0x64, 0xb3, 0xa0, 0xae, 0x0c, - 0xd9, 0x36, 0xa1, 0xc7, 0xd8, 0xbf, 0x7a, 0x43, 0xbf, 0xc0, 0xc6, 0x90, - 0x60, 0x6a, 0x23, 0xc0, 0x6a, 0x5d, 0x62, 0x18, 0xac, 0xc1, 0x20, 0x35, - 0x17, 0xba, 0x4e, 0x54, 0xb7, 0xec, 0xd4, 0xad, 0x99, 0x94, 0xa4, 0xda, - 0x57, 0xe7, 0x46, 0xed, 0x47, 0xd1, 0xb4, 0xa2, 0x3e, 0x0f, 0x4a, 0xb6, - 0xa6, 0x68, 0x3e, 0x94, 0xb9, 0x18, 0x30, 0xe0, 0x75, 0x08, 0xe8, 0xf3, - 0x21, 0x79, 0x26, 0x68, 0x6a, 0x65, 0xb6, 0xbe, 0x03, 0x98, 0x8f, 0x04, - 0xad, 0x1e, 0xb0, 0x54, 0xd2, 0x28, 0xdd, 0x4a, 0xe9, 0xf3, 0xa0, 0x06, - 0xbf, 0x0b, 0x2a, 0xee, 0xf8, 0x03, 0x7e, 0x1d, 0x37, 0xc1, 0x32, 0xd1, - 0x41, 0xf4, 0x9b, 0xc5, 0x02, 0x10, 0x6f, 0x55, 0x5a, 0xec, 0x5b, 0xe7, - 0x61, 0x05, 0x17, 0xf0, 0xf8, 0xc6, 0x89, 0xe8, 0xad, 0x32, 0x57, 0x14, - 0xe5, 0xf8, 0xf5, 0x88, 0xd9, 0x73, 0x17, 0x10, 0xa7, 0xc3, 0xf8, 0x78, - 0x0b, 0x66, 0xab, 0x63, 0x4f, 0x96, 0x5d, 0xdf, 0x36, 0x83, 0xc4, 0x6f, - 0x20, 0xbd, 0xcb, 0x4c, 0xd2, 0xfa, 0x35, 0x87, 0xd8, 0xb6, 0xbb, 0xcc, - 0xb6, 0xd2, 0x85, 0x03, 0x6a, 0xea, 0xbb, 0x6d, 0x2f, 0xa2, 0x06, 0xc0, - 0xd6, 0x68, 0xd9, 0x7f, 0xd6, 0xa2, 0x3b, 0x08, 0x6a, 0x98, 0x26, 0x6d, - 0x9a, 0x2b, 0x68, 0x51, 0x78, 0xde, 0xa6, 0x96, 0x50, 0x7b, 0xfc, 0x03, - 0x43, 0xf8, 0x21, 0x01, 0x9d, 0xe2, 0x89, 0x65, 0x47, 0xae, 0x9c, 0x45, - 0x5e, 0xa5, 0xce, 0x97, 0xb3, 0xe6, 0xf6, 0xd4, 0x5a, 0xe8, 0x6b, 0x87, - 0xd6, 0xdf, 0xfb, 0x1f, 0xaf, 0xfb, 0xaf, 0x19, 0xa5, 0xfd, 0xba, 0xe0, - 0x22, 0x2f, 0x91, 0x97, 0xdf, 0xae, 0xe9, 0x39, 0xb1, 0xe4, 0xd3, 0x10, - 0xcb, 0xb3, 0x03, 0xb5, 0x0b, 0xf0, 0xd9, 0x70, 0x1e, 0x9c, 0x63, 0x6f, - 0x3a, 0xcf, 0x3c, 0x1b, 0x86, 0xa3, 0xad, 0x1a, 0xe7, 0x4c, 0x09, 0xd0, - 0x80, 0xf6, 0x8b, 0x72, 0x96, 0x53, 0x7e, 0x66, 0xfb, 0x7c, 0x7c, 0x8a, - 0xb0, 0x60, 0xa6, 0x4c, 0x20, 0xc4, 0x63, 0x69, 0x6a, 0xc3, 0x53, 0xf8, - 0x9a, 0x28, 0x30, 0x9d, 0x6f, 0x0e, 0x1b, 0xb2, 0x2c, 0xe6, 0x94, 0x9f, - 0xfc, 0xc0, 0x8d, 0x71, 0xbe, 0x37, 0xa6, 0xc9, 0xbd, 0x3c, 0x4a, 0xf3, - 0xc4, 0xb3, 0x88, 0x4c, 0x45, 0x26, 0x4e, 0x2f, 0x83, 0x16, 0x70, 0xb6, - 0xc7, 0xb2, 0x36, 0xf0, 0x0c, 0x67, 0xd2, 0x0a, 0xd3, 0xd9, 0x7c, 0x35, - 0x29, 0xac, 0xd4, 0x9c, 0x6d, 0xfc, 0xec, 0x58, 0x92, 0xf0, 0xba, 0x32, - 0x00, 0xae, 0xb1, 0xeb, 0x4d, 0x8c, 0x1a, 0x20, 0xe7, 0x5c, 0xfc, 0x9a, - 0x4d, 0x51, 0x24, 0x7b, 0x52, 0xeb, 0x13, 0x3d, 0xb4, 0xab, 0xda, 0xb3, - 0x74, 0x39, 0xd2, 0xf8, 0x2d, 0xef, 0x9b, 0x0f, 0xae, 0xf5, 0x3c, 0x99, - 0x34, 0xbe, 0x15, 0x5c, 0x9f, 0x5d, 0xae, 0xf4, 0x72, 0xc2, 0xac, 0x06, - 0xbe, 0xad, 0xe4, 0x68, 0xea, 0xd5, 0xa1, 0xdc, 0xdb, 0xf4, 0x61, 0x51, - 0xf5, 0x1a, 0x62, 0x15, 0xfd, 0x00, 0x51, 0x35, 0x53, 0x6c, 0x39, 0x3e, - 0xdb, 0x60, 0x0a, 0x52, 0xc1, 0x52, 0x3c, 0xd7, 0xab, 0x73, 0xea, 0x1e, - 0x38, 0x38, 0x65, 0x35, 0x35, 0x2b, 0x28, 0x04, 0x5c, 0x82, 0xea, 0x4a, - 0x9e, 0x96, 0x72, 0xa4, 0x8e, 0x42, 0xfd, 0x55, 0xa8, 0x66, 0x7a, 0x40, - 0xc9, 0xf2, 0xc2, 0x1e, 0x5d, 0x09, 0x90, 0x32, 0x18, 0xdb, 0x11, 0x4c, - 0x6c, 0x9c, 0x27, 0x62, 0x0a, 0xe6, 0xc1, 0xdf, 0xf2, 0x6a, 0x8c, 0x26, - 0xb4, 0xfb, 0xda, 0xa9, 0x08, 0x10, 0x3a, 0xf0, 0xe1, 0x64, 0xe5, 0x03, - 0x81, 0x7d, 0x15, 0x74, 0xa1, 0x8d, 0x10, 0xc8, 0xbb, 0x6a, 0x7c, 0x60, - 0xa1, 0x09, 0x35, 0x19, 0x2d, 0x70, 0xb5, 0x36, 0xc8, 0x8b, 0x66, 0x5f, - 0xe0, 0xe7, 0xea, 0x70, 0x2f, 0x5d, 0x3f, 0xae, 0x5e, 0x25, 0x84, 0xdd, - 0x9b, 0x69, 0x44, 0x37, 0x7c, 0x6b, 0x9e, 0x81, 0x18, 0x36, 0x4b, 0xff, - 0x86, 0x44, 0x2a, 0x39, 0x66, 0x7f, 0x71, 0x43, 0xe7, 0x65, 0xfe, 0xfd, - 0x34, 0xb9, 0xd9, 0x5a, 0x00, 0xd1, 0x41, 0x43, 0xc7, 0xbc, 0x65, 0x68, - 0xb7, 0x73, 0xff, 0x19, 0xd3, 0xed, 0x15, 0xa4, 0x67, 0xa1, 0x53, 0x0e, - 0xa6, 0xfb, 0x25, 0xce, 0x9d, 0x5b, 0x73, 0x08, 0xf3, 0x3b, 0x69, 0xe4, - 0x94, 0x9b, 0x94, 0x03, 0xb3, 0x8a, 0x2e, 0x07, 0x0c, 0xef, 0x18, 0x4c, - 0x2b, 0x1c, 0x83, 0x9f, 0x25, 0x20, 0x29, 0x72, 0x11, 0xa0, 0xaa, 0xed, - 0x0c, 0xf9, 0xce, 0x94, 0x0d, 0x7a, 0xb6, 0xb3, 0xa4, 0x57, 0xd6, 0x61, - 0xca, 0x1a, 0x0e, 0x89, 0x6d, 0x99, 0x4d, 0x06, 0xcd, 0x83, 0x7e, 0x09, - 0x14, 0x5b, 0xe7, 0x4c, 0x72, 0xa8, 0x98, 0xc8, 0x27, 0xf3, 0x70, 0x89, - 0x87, 0x11, 0xbb, 0x98, 0x82, 0x77, 0x9d, 0xaa, 0x95, 0x8c, 0xc1, 0xf8, - 0x39, 0x27, 0xd5, 0x64, 0x59, 0x6a, 0x8c, 0xbe, 0xe2, 0xe1, 0xd1, 0x6b, - 0xe3, 0xaf, 0x30, 0x6f, 0xf4, 0x9e, 0x35, 0x0b, 0x10, 0x24, 0x77, 0xd8, - 0xa4, 0x30, 0x2e, 0xf7, 0x97, 0xfd, 0xef, 0x1e, 0x9e, 0xf2, 0xbd, 0xf2, - 0x41, 0x73, 0x19, 0xe6, 0x7b, 0x7f, 0x74, 0x11, 0x91, 0x38, 0xc5, 0xac, - 0xd5, 0xb0, 0x48, 0xc4, 0xe9, 0x41, 0xd4, 0x50, 0x76, 0x13, 0xbf, 0xec, - 0xe8, 0x3a, 0xa8, 0x84, 0x42, 0x98, 0x12, 0x64, 0x95, 0x85, 0x79, 0x29, - 0xea, 0x3a, 0xf9, 0xa4, 0x5c, 0x9c, 0x35, 0x01, 0x68, 0x71, 0xb9, 0x5b, - 0xbe, 0xaa, 0x76, 0x9e, 0x63, 0x1c, 0xc1, 0x83, 0x94, 0xc6, 0x89, 0x2b, - 0x1d, 0x00, 0x43, 0x74, 0x00, 0x41, 0x93, 0x58, 0x52, 0xf9, 0x13, 0xfe, - 0x9f, 0x7a, 0xb7, 0x3d, 0x6b, 0x70, 0x4e, 0x4f, 0x8f, 0xf4, 0x9c, 0xe4, - 0x97, 0x62, 0xaf, 0x69, 0x45, 0xec, 0xf4, 0x53, 0x71, 0xdc, 0xc7, 0x8d, - 0x6f, 0xb2, 0x9d, 0xec, 0x43, 0xdd, 0xc0, 0xe5, 0xd1, 0x6c, 0x1a, 0x82, - 0x19, 0xf6, 0x18, 0xd3, 0x59, 0x0e, 0x07, 0x81, 0x5a, 0x23, 0x10, 0x8b, - 0xaa, 0x0b, 0x99, 0xc8, 0x34, 0xc2, 0xd0, 0xa9, 0x69, 0x7f, 0x54, 0xe3, - 0xc4, 0xa0, 0xe7, 0x4b, 0x31, 0x90, 0xe7, 0x3b, 0x45, 0x9b, 0x7f, 0xae, - 0xd2, 0xab, 0x22, 0xb9, 0xfc, 0x07, 0x39, 0x4b, 0x45, 0x83, 0x8d, 0x41, - 0x7a, 0x52, 0xb2, 0xae, 0x71, 0x78, 0x17, 0x63, 0xfa, 0xbe, 0x59, 0xca, - 0xf0, 0xfd, 0x68, 0xe5, 0xc4, 0x9a, 0x74, 0x3d, 0xec, 0xd4, 0x8b, 0xa1, - 0x2c, 0x31, 0x4d, 0x73, 0xfd, 0x5c, 0x1e, 0xeb, 0x5f, 0xf6, 0x42, 0x0d, - 0x79, 0x5f, 0x64, 0x10, 0xae, 0xb2, 0xf6, 0x9e, 0xa8, 0xab, 0xa5, 0x2b, - 0x9a, 0xcf, 0x25, 0xfa, 0xa2, 0xb3, 0xdc, 0x30, 0x3d, 0x08, 0x4e, 0xbb, - 0x7b, 0x0c, 0x28, 0x34, 0x9d, 0xda, 0xc4, 0x94, 0xa4, 0xf4, 0x1e, 0x78, - 0x8b, 0xa9, 0xd3, 0xa7, 0x1c, 0x2a, 0x27, 0x14, 0xa0, 0x44, 0x1a, 0x9a, - 0x87, 0x72, 0xa5, 0x6d, 0x69, 0x46, 0xe5, 0xc1, 0x4f, 0x29, 0x87, 0xc0, - 0xa7, 0xa8, 0x96, 0xde, 0xa9, 0x63, 0x08, 0xd8, 0x4a, 0xa1, 0x25, 0x43, - 0x76, 0x41, 0xf7, 0x9f, 0x17, 0xe3, 0xe1, 0x4b, 0xc6, 0x2b, 0x79, 0xea, - 0xd5, 0xa7, 0x72, 0x16, 0x0a, 0x8c, 0xcd, 0x49, 0x70, 0x75, 0xd4, 0x59, - 0x4a, 0x19, 0x7b, 0x31, 0x02, 0x7a, 0x3a, 0x20, 0x15, 0x62, 0x7e, 0x4e, - 0x6f, 0xac, 0xd0, 0xd1, 0x29, 0xbd, 0x2d, 0xa1, 0xc6, 0x3e, 0xa6, 0x1a, - 0x26, 0x18, 0x96, 0x98, 0x12, 0x56, 0x37, 0xbf, 0xb4, 0x91, 0x57, 0xe8, - 0xda, 0x61, 0x7c, 0x2f, 0x3e, 0xd4, 0x51, 0xfe, 0xe8, 0x5b, 0x00, 0x30, - 0x08, 0xf6, 0x4e, 0x69, 0xa8, 0x1a, 0x2b, 0x82, 0x41, 0x85, 0xa9, 0xd9, - 0x3c, 0xc8, 0x02, 0x91, 0x99, 0xd4, 0xa2, 0xfd, 0x9d, 0x1b, 0x08, 0xfc, - 0x41, 0x3e, 0x10, 0x6b, 0x80, 0x74, 0x3d, 0x72, 0x61, 0x97, 0xdd, 0x96, - 0xec, 0xf4, 0xd6, 0x6d, 0x68, 0x02, 0x6e, 0xbb, 0x55, 0x9d, 0x6f, 0x11, - 0xde, 0xd1, 0xad, 0x6d, 0x42, 0x96, 0x2c, 0x42, 0x1e, 0xa9, 0x19, 0x42, - 0x22, 0x38, 0x38, 0x18, 0x3c, 0x4b, 0xc1, 0x9c, 0x0f, 0xe1, 0x34, 0x61, - 0x06, 0x77, 0x54, 0x04, 0xe0, 0x87, 0x94, 0x5c, 0xc9, 0xa1, 0x35, 0x55, - 0x3d, 0x4a, 0xf2, 0x4f, 0x05, 0x11, 0x98, 0x6f, 0x3c, 0x85, 0x84, 0xe6, - 0xf8, 0x71, 0x8a, 0xdf, 0xe9, 0x9a, 0xe3, 0x70, 0xd6, 0x36, 0xd6, 0xc8, - 0x66, 0x3e, 0xba, 0x7c, 0x0a, 0x23, 0x0a, 0xd0, 0xb6, 0x66, 0x68, 0xa8, - 0xdf, 0x37, 0x17, 0xfb, 0xdd, 0x9c, 0x8b, 0xc7, 0x8e, 0xc4, 0x4f, 0x40, - 0x08, 0x23, 0x58, 0x15, 0xa2, 0xba, 0xef, 0xdf, 0x67, 0xcd, 0x1f, 0xb6, - 0xc4, 0xea, 0xce, 0x81, 0x38, 0x58, 0x92, 0x57, 0xcf, 0x83, 0x47, 0x29, - 0x9f, 0xde, 0x9b, 0xde, 0x01, 0xfe, 0x68, 0x91, 0x67, 0x06, 0x9d, 0x31, - 0xd0, 0xb9, 0xc3, 0xbb, 0xc3, 0x6b, 0xa0, 0x04, 0x1e, 0x34, 0xd5, 0x38, - 0xd4, 0xac, 0x70, 0xae, 0xab, 0xb2, 0xbd, 0x4b, 0xa0, 0xad, 0x2b, 0x82, - 0xaf, 0x8c, 0x90, 0x4d, 0xd3, 0xca, 0x71, 0x35, 0x75, 0x89, 0xe5, 0x42, - 0x91, 0x46, 0x8d, 0x18, 0x04, 0x7a, 0xb9, 0xaa, 0x3b, 0xe7, 0x1e, 0x8c, - 0x4e, 0xf9, 0x6e, 0x74, 0xaa, 0x2e, 0x36, 0x86, 0xfb, 0xef, 0x9c, 0xd7, - 0xba, 0x5e, 0x2e, 0x3c, 0x40, 0xce, 0x8b, 0x2b, 0x94, 0x55, 0xf2, 0xd4, - 0x7d, 0xbf, 0x8c, 0x8a, 0xa8, 0x59, 0x84, 0x6f, 0x32, 0x95, 0xc5, 0xcc, - 0xad, 0xee, 0x30, 0x23, 0x7c, 0x54, 0xea, 0x60, 0xb8, 0x88, 0x12, 0x45, - 0x03, 0xbc, 0xe3, 0x92, 0x9f, 0xa8, 0x5b, 0x07, 0x97, 0x53, 0x0d, 0xe1, - 0xe3, 0x3d, 0xdf, 0xf2, 0x2a, 0x12, 0xee, 0xdf, 0x73, 0x8d, 0x41, 0xf4, - 0xe4, 0x2c, 0xb4, 0xd4, 0x9e, 0xfe, 0xf2, 0xe6, 0xa0, 0x9e, 0x2a, 0x3a, - 0x36, 0x26, 0x7e, 0xd9, 0xe1, 0x22, 0xee, 0x0b, 0x5b, 0x48, 0xd2, 0xa9, - 0x55, 0xab, 0x50, 0x7c, 0xf6, 0xc8, 0x56, 0x31, 0xbb, 0x51, 0xe9, 0x31, - 0x4d, 0xaa, 0x13, 0x3a, 0x99, 0x9f, 0x8c, 0x59, 0x6a, 0xc9, 0xf1, 0x0a, - 0x89, 0xcc, 0x39, 0x98, 0xbd, 0xc3, 0x93, 0x97, 0x28, 0xe5, 0x73, 0x94, - 0xf2, 0x0a, 0x7a, 0x09, 0x38, 0x0b, 0xab, 0xd8, 0x49, 0x98, 0x14, 0x34, - 0x32, 0x9d, 0xef, 0x9d, 0x47, 0xdb, 0x82, 0xb9, 0x84, 0xd6, 0xd7, 0x9f, - 0xf7, 0xdf, 0x79, 0x5b, 0xe8, 0x92, 0x44, 0x31, 0x5d, 0x42, 0x80, 0x90, - 0x8d, 0x36, 0xa2, 0x39, 0x02, 0x64, 0x21, 0xa2, 0xb8, 0xfc, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xeb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0xd8, 0x03, 0x00, 0x00, 0xdc, 0x03, 0x00, 0x00, - 0xe0, 0x03, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, - 0x50, 0x03, 0x00, 0x00, 0x04, 0x03, 0x00, 0x00, 0xac, 0x02, 0x00, 0x00, - 0x74, 0x02, 0x00, 0x00, 0x2c, 0x02, 0x00, 0x00, 0xf4, 0x01, 0x00, 0x00, - 0xac, 0x01, 0x00, 0x00, 0x74, 0x01, 0x00, 0x00, 0x2c, 0x01, 0x00, 0x00, - 0xe4, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x9e, 0xfc, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x00, 0x00, 0x00, 0x5e, 0xfd, 0xff, 0xff, 0x1c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, - 0x96, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x88, 0xfd, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x32, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x31, 0x00, 0x00, 0x00, 0xca, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x78, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, - 0x2c, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x00, - 0x0e, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0xbc, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x25, 0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x52, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x25, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x21, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x96, 0xfe, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x88, 0xfe, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x1d, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, 0xca, 0xfe, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x78, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x1d, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x19, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x0e, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, - 0x16, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x42, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xf0, 0xfe, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x11, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x86, 0xff, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x78, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xba, 0xff, 0xff, 0xff, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x68, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x14, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x14, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x0b, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x34, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, 0x00, - 0x34, 0x10, 0x00, 0x00, 0xd4, 0x0f, 0x00, 0x00, 0x7c, 0x0f, 0x00, 0x00, - 0x38, 0x0f, 0x00, 0x00, 0xdc, 0x0e, 0x00, 0x00, 0x74, 0x0e, 0x00, 0x00, - 0x2c, 0x0e, 0x00, 0x00, 0xe8, 0x0d, 0x00, 0x00, 0x90, 0x0d, 0x00, 0x00, - 0x48, 0x0d, 0x00, 0x00, 0x04, 0x0d, 0x00, 0x00, 0xc0, 0x0c, 0x00, 0x00, - 0x64, 0x0c, 0x00, 0x00, 0x0c, 0x0c, 0x00, 0x00, 0xc4, 0x0b, 0x00, 0x00, - 0x80, 0x0b, 0x00, 0x00, 0x28, 0x0b, 0x00, 0x00, 0xe0, 0x0a, 0x00, 0x00, - 0x9c, 0x0a, 0x00, 0x00, 0x58, 0x0a, 0x00, 0x00, 0xfc, 0x09, 0x00, 0x00, - 0xa4, 0x09, 0x00, 0x00, 0x5c, 0x09, 0x00, 0x00, 0x18, 0x09, 0x00, 0x00, - 0xc0, 0x08, 0x00, 0x00, 0x78, 0x08, 0x00, 0x00, 0x34, 0x08, 0x00, 0x00, - 0xf0, 0x07, 0x00, 0x00, 0x94, 0x07, 0x00, 0x00, 0x3c, 0x07, 0x00, 0x00, - 0xf4, 0x06, 0x00, 0x00, 0xb0, 0x06, 0x00, 0x00, 0x58, 0x06, 0x00, 0x00, - 0x10, 0x06, 0x00, 0x00, 0xcc, 0x05, 0x00, 0x00, 0x88, 0x05, 0x00, 0x00, - 0x2c, 0x05, 0x00, 0x00, 0xd4, 0x04, 0x00, 0x00, 0x8c, 0x04, 0x00, 0x00, - 0x48, 0x04, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0xa8, 0x03, 0x00, 0x00, - 0x50, 0x03, 0x00, 0x00, 0x08, 0x03, 0x00, 0x00, 0xc4, 0x02, 0x00, 0x00, - 0x80, 0x02, 0x00, 0x00, 0x24, 0x02, 0x00, 0x00, 0xcc, 0x01, 0x00, 0x00, - 0x84, 0x01, 0x00, 0x00, 0x40, 0x01, 0x00, 0x00, 0xe8, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xb2, 0xf0, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0xfc, 0xf0, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x38, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0xf2, 0xf0, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x28, 0x00, 0x00, 0x00, 0x3c, 0xf1, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0a, 0xd7, 0x23, 0x3a, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x32, 0xf1, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x40, 0x00, 0x00, 0x00, - 0x24, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x37, 0x01, 0x00, 0x00, 0x00, - 0xc2, 0xff, 0x7f, 0x3f, 0x01, 0x00, 0x00, 0x00, 0xd2, 0x6f, 0x75, 0x36, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x8a, 0xf1, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xf1, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x06, 0x16, 0x49, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x87, 0x19, 0xb1, 0x40, 0x01, 0x00, 0x00, 0x00, 0x58, 0x80, 0xdf, 0xc0, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x3a, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1f, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x2c, 0xf2, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x5d, 0xd1, 0xce, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x7a, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x6c, 0xf2, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x23, 0x20, 0xb6, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x62, 0xf2, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x54, 0xf2, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xa2, 0x5a, 0x91, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x47, 0xc9, 0x90, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x00, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, 0xac, 0xf2, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x81, 0xb7, 0xf1, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9e, 0xb5, 0x71, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x33, 0x20, 0x70, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x04, 0x00, 0x00, 0x6a, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x1d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0x5c, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7a, 0x08, 0x97, 0x35, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xaa, 0xf3, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x24, 0x00, 0x00, 0x00, 0x9c, 0xf3, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0xf5, 0x1f, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xea, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0xdc, 0xf3, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xc7, 0xea, 0x1a, 0x3c, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xd2, 0xf3, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xc4, 0xf3, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0xb2, 0x78, 0x3f, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x39, 0xb9, 0x3e, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70, 0xf5, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x3c, 0x00, 0x00, 0x00, 0x1c, 0xf4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x89, 0x25, 0xf2, 0x39, 0x01, 0x00, 0x00, 0x00, - 0xde, 0xdc, 0x1d, 0x41, 0x01, 0x00, 0x00, 0x00, 0xa5, 0x23, 0x72, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, - 0xda, 0xf4, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0xcc, 0xf4, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x42, 0xe0, 0x90, 0x35, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x1a, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, - 0x0c, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x1a, 0x2a, 0x19, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x5a, 0xf5, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x4c, 0xf5, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xe9, 0x36, 0xdd, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x42, 0xf5, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x34, 0xf5, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0xdd, 0x43, 0x7e, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x99, 0x45, 0x7d, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xe0, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0x8c, 0xf5, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x5c, 0xfd, 0xa9, 0x39, 0x01, 0x00, 0x00, 0x00, 0x1e, 0xaa, 0x87, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x08, 0xfc, 0x29, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x4a, 0xf6, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x3c, 0xf6, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x55, 0xf7, 0x52, 0x35, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x8a, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, 0x7c, 0xf6, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd0, 0xda, 0x1e, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0xca, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0xbc, 0xf6, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x8e, 0x0b, 0xa8, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xb2, 0xf6, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0xa4, 0xf6, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x12, 0x1c, 0x6e, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0xdd, 0x4a, 0x00, 0x41, 0x01, 0x00, 0x00, 0x00, 0x31, 0xc6, 0xd9, 0xc0, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x62, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x54, 0xf7, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0x9d, 0x16, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xa2, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x94, 0xf7, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xa4, 0x34, 0xab, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x8a, 0xf7, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xf7, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x2e, 0x36, 0xe1, 0x3c, 0x01, 0x00, 0x00, 0x00, - 0xf8, 0x54, 0xe0, 0x40, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x28, 0xf9, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, 0xd4, 0xf7, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xe1, 0xd0, 0xa2, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x9b, 0xcf, 0x22, 0x41, 0x01, 0x00, 0x00, 0x00, - 0xea, 0x23, 0x12, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x02, 0x00, 0x00, 0x92, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0x84, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x99, 0xd3, 0xf7, 0x34, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf8, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x24, 0x00, 0x00, 0x00, 0xc4, 0xf8, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0xd5, 0xc2, 0x3a, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x12, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0x04, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8f, 0x84, 0xa2, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xfa, 0xf8, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xec, 0xf8, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x64, 0xeb, 0x8e, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x3b, 0xf3, 0x17, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xb7, 0xc5, 0x04, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0xaa, 0xf9, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x9c, 0xf9, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x92, 0xa8, 0x98, 0x39, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xea, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, 0xdc, 0xf9, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x58, 0x76, 0xb9, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xd2, 0xf9, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, - 0xc4, 0xf9, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, - 0x43, 0xb8, 0x52, 0x3d, 0x01, 0x00, 0x00, 0x00, 0x8b, 0xe5, 0x51, 0x41, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70, 0xfb, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - 0x3c, 0x00, 0x00, 0x00, 0x1c, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xa1, 0xf0, 0x39, 0x01, 0x00, 0x00, 0x00, - 0x02, 0xa0, 0x70, 0x41, 0x01, 0x00, 0x00, 0x00, 0x87, 0x08, 0x65, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, - 0xda, 0xfa, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0xcc, 0xfa, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xcc, 0x98, 0x41, 0x35, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x1a, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, - 0x0c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xed, 0xf5, 0xcd, 0x3a, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x5a, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x4c, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x9d, 0xca, 0xd4, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x42, 0xfb, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x34, 0xfb, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x58, 0xce, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x49, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x06, 0x52, 0xc1, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0xf2, 0xfb, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, - 0xe4, 0xfb, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x9b, 0x9c, 0xe1, 0x39, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x32, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x28, 0x00, 0x00, 0x00, 0x24, 0xfc, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xf8, 0xb6, 0xc3, 0x3b, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x1a, 0xfc, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x3c, 0x00, 0x00, 0x00, 0x0c, 0xfc, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00, 0x94, 0x8d, 0x93, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x06, 0xfa, 0x92, 0x41, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0xb8, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0x64, 0xfc, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x7a, 0xf6, 0x5f, 0x3a, 0x01, 0x00, 0x00, 0x00, 0xba, 0xf4, 0xdf, 0x41, - 0x01, 0x00, 0x00, 0x00, 0xf4, 0x7c, 0xcf, 0xc1, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x22, 0xfd, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x14, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x46, 0x2f, 0xc4, 0x35, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x62, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x24, 0x00, 0x00, 0x00, 0x54, 0xfd, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x8f, 0x3f, 0xe0, 0x3a, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xa2, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x94, 0xfd, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x25, 0xd7, 0xa9, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x8a, 0xfd, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x7c, 0xfd, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0xc4, 0xf4, 0x39, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xf4, 0x1f, 0xe3, 0x41, 0x01, 0x00, 0x00, 0x00, 0xaa, 0x55, 0x8f, 0xc1, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x3a, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x02, 0x28, 0x00, 0x00, 0x00, 0x2c, 0xfe, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x8b, 0x00, 0x4b, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x7a, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x28, 0x00, 0x00, 0x00, - 0x6c, 0xfe, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xd7, 0xdf, 0xc3, 0x3b, - 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x62, 0xfe, 0xff, 0xff, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x3c, 0x00, 0x00, 0x00, 0x54, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x68, 0xa8, 0x04, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0xc0, 0x23, 0x04, 0x42, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x07, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x3c, 0x00, 0x00, 0x00, - 0xbc, 0xfe, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x1c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x3b, 0xda, 0x75, 0x3b, 0x01, 0x00, 0x00, 0x00, 0x4f, 0xd8, 0xf5, 0x42, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x2a, 0x61, 0xc2, 0x02, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x7a, 0xff, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x28, 0x00, 0x00, 0x00, 0x6c, 0xff, 0xff, 0xff, 0x08, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xcf, 0x37, 0x69, 0x37, 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xba, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x07, 0x28, 0x00, 0x00, 0x00, 0xac, 0xff, 0xff, 0xff, - 0x08, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x14, 0xd8, 0x72, 0x3b, 0x02, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x14, 0x00, 0x10, 0x00, 0x0f, 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x09, 0x30, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x04, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xd4, 0x42, 0x16, 0x3c, 0x02, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x10, 0x00, 0x0c, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x4c, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x14, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x08, 0x00, 0x04, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xa8, 0x41, 0x5b, 0x3d, 0x01, 0x00, 0x00, 0x00, - 0x66, 0x66, 0x5a, 0x41, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, - 0x0f, 0x00, 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, - 0xa4, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x74, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, - 0x5c, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x0b, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x96, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x72, - 0x9e, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x19, 0xa6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xae, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xb6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xbe, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x1b, 0xc6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0xce, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, 0xd6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0xde, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x1b, - 0xe6, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xfa, 0xff, 0xff, 0xff, - 0x00, 0x1b, 0x06, 0x00, 0x06, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x09, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x1b}; - -const unsigned int g_keyword_scrambled_model_data_length = 34520; diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h deleted file mode 100644 index ce34426c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/benchmarks/keyword_scrambled_model_data.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ -#define TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ - -extern const unsigned char g_keyword_scrambled_model_data[]; -extern const unsigned int g_keyword_scrambled_model_data_length; - -#endif // TENSORFLOW_LITE_MICRO_BENCHMARKS_KEYWORD_SCRAMBLED_MODEL_DATA_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/compatibility.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/compatibility.h deleted file mode 100644 index 49acb28f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/compatibility.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ -#define TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ - -// C++ will automatically create class-specific delete operators for virtual -// objects, which by default call the global delete function. For embedded -// applications we want to avoid this, and won't be calling new/delete on these -// objects, so we need to override the default implementation with one that does -// nothing to avoid linking in ::delete(). -// This macro needs to be included in all subclasses of a virtual base class in -// the private section. -#ifdef TF_LITE_STATIC_MEMORY -#define TF_LITE_REMOVE_VIRTUAL_DELETE \ - void operator delete(void* p) {} -#else -#define TF_LITE_REMOVE_VIRTUAL_DELETE -#endif - -#endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/debug_log.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/debug_log.cc deleted file mode 100644 index 46ca253a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/debug_log.cc +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Reference implementation of the DebugLog() function that's required for a -// platform to support the TensorFlow Lite for Microcontrollers library. This is -// the only function that's absolutely required to be available on a target -// device, since it's used for communicating test results back to the host so -// that we can verify the implementation is working correctly. -// It's designed to be as easy as possible to supply an implementation though. -// On platforms that have a POSIX stack or C library, it can be written as a -// single call to `fprintf(stderr, "%s", s)` to output a string to the error -// stream of the console, but if there's no OS or C library available, there's -// almost always an equivalent way to write out a string to some serial -// interface that can be used instead. For example on Arm M-series MCUs, calling -// the `bkpt #0xAB` assembler instruction will output the string in r1 to -// whatever debug serial connection is available. If you're running mbed, you -// can do the same by creating `Serial pc(USBTX, USBRX)` and then calling -// `pc.printf("%s", s)`. -// To add an equivalent function for your own platform, create your own -// implementation file, and place it in a subfolder with named after the OS -// you're targeting. For example, see the Cortex M bare metal version in -// tensorflow/lite/micro/bluepill/debug_log.cc or the mbed one on -// tensorflow/lite/micro/mbed/debug_log.cc. - -#include "tensorflow/lite/micro/debug_log.h" - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#include -#endif - -extern "C" void DebugLog(const char* s) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - // Reusing TF_LITE_STRIP_ERROR_STRINGS to disable DebugLog completely to get - // maximum reduction in binary size. This is because we have DebugLog calls - // via TF_LITE_CHECK that are not stubbed out by TF_LITE_REPORT_ERROR. - fprintf(stderr, "%s", s); -#endif -} diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/debug_log.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/debug_log.h deleted file mode 100644 index c2840d0f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/debug_log.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ -#define TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// This function should be implemented by each target platform, and provide a -// way for strings to be output to some text stream. For more information, see -// tensorflow/lite/micro/debug_log.cc. -void DebugLog(const char* s); - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus - -#endif // TENSORFLOW_LITE_MICRO_DEBUG_LOG_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/activation_utils.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/activation_utils.h deleted file mode 100644 index 95ecc26d..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/activation_utils.h +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ - -#include -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/internal/cppmath.h" -#include "tensorflow/lite/kernels/internal/max.h" -#include "tensorflow/lite/kernels/internal/min.h" - -namespace tflite { -namespace ops { -namespace micro { - -// Returns the floating point value for a fused activation: -inline float ActivationValFloat(TfLiteFusedActivation act, float a) { - switch (act) { - case kTfLiteActNone: - return a; - case kTfLiteActRelu: - return TfLiteMax(0.0f, a); - case kTfLiteActReluN1To1: - return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); - case kTfLiteActRelu6: - return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); - case kTfLiteActTanh: - return std::tanh(a); - case kTfLiteActSignBit: - return std::signbit(a); - case kTfLiteActSigmoid: - return 1.0f / (1.0f + std::exp(-a)); - } - return 0.0f; // To indicate an unsupported activation (i.e. when a new fused - // activation is added to the enum and not handled here). -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATION_UTILS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/activations.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/activations.cc deleted file mode 100644 index a92d5c73..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/activations.cc +++ /dev/null @@ -1,288 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { - -struct ReluOpData { - ReluParams params; -}; - -struct Relu6OpData { - int8_t six_int8; - int8_t zero_int8; - uint8_t six_uint8; - uint8_t zero_uint8; -}; - -} // namespace - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -template -inline void ReluQuantized(const ReluOpData& data, - const RuntimeShape& input_shape, - const RuntimeShape& output_shape, const T* input_data, - T* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const int32_t val = static_cast(input_data[i]); - int32_t clamped = - data.params.output_offset + - MultiplyByQuantizedMultiplier(val - data.params.input_offset, - data.params.output_multiplier, - data.params.output_shift); - clamped = std::max(data.params.quantized_activation_min, clamped); - clamped = std::min(data.params.quantized_activation_max, clamped); - output_data[i] = static_cast(clamped); - } -} - -template -inline void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, - ReluOpData* data) { - float act_min = 0.0; - float act_max = std::numeric_limits::infinity(); - double real_multiplier = - static_cast(input->params.scale / output->params.scale); - - const RuntimeShape input_shape = GetTensorShape(input); - const RuntimeShape output_shape = GetTensorShape(output); - - QuantizeMultiplier(real_multiplier, &data->params.output_multiplier, - &data->params.output_shift); - - data->params.quantized_activation_min = std::max( - static_cast(std::numeric_limits::min()), - output->params.zero_point + - static_cast(roundf(act_min / output->params.scale))); - data->params.quantized_activation_max = - act_max == std::numeric_limits::infinity() - ? static_cast(std::numeric_limits::max()) - : std::min(static_cast(std::numeric_limits::max()), - output->params.zero_point + - static_cast( - roundf(act_max / output->params.scale))); - data->params.input_offset = input->params.zero_point; - data->params.output_offset = output->params.zero_point; -} - -inline void ReluFloat(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const float val = input_data[i]; - const float lower = 0.0f; - const float clamped = val < lower ? lower : val; - output_data[i] = clamped; - } -} - -inline void Relu6Float(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const float val = input_data[i]; - const float upper = 6.0f; - const float lower = 0.0f; - const float clamped = val > upper ? upper : val < lower ? lower : val; - output_data[i] = clamped; - } -} - -template -inline void Relu6Quantized(Q lower, Q upper, const RuntimeShape& input_shape, - const Q* input_data, - const RuntimeShape& output_shape, Q* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const Q val = input_data[i]; - const Q clamped = val > upper ? upper : val < lower ? lower : val; - output_data[i] = clamped; - } -} - -void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(ReluOpData)); -} - -TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - ReluOpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - if (input->type == kTfLiteInt8) { - CalculateReluOpData(input, output, data); - } else if (input->type == kTfLiteUInt8) { - CalculateReluOpData(input, output, data); - } - - return kTfLiteOk; -} - -TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const ReluOpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: { - ReluFloat(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; - } - case kTfLiteInt8: { - ReluQuantized(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteUInt8: { - ReluQuantized(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: { - TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } -} - -void* Relu6Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(Relu6OpData)); -} - -TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - Relu6OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - - if (input->type == kTfLiteInt8) { - data->six_int8 = FloatToQuantizedType(6.0f, input->params.scale, - input->params.zero_point); - data->zero_int8 = input->params.zero_point; - } else if (input->type == kTfLiteUInt8) { - data->six_uint8 = FloatToQuantizedType(6.0f, input->params.scale, - input->params.zero_point); - data->zero_uint8 = input->params.zero_point; - } - - return kTfLiteOk; -} - -TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const Relu6OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: { - Relu6Float(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; - } - case kTfLiteInt8: { - Relu6Quantized(data.zero_int8, data.six_int8, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteUInt8: { - Relu6Quantized(data.zero_uint8, data.six_uint8, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: { - TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } -} - -} // namespace activations - -TfLiteRegistration Register_RELU() { - return {/*init=*/activations::ReluInit, - /*free=*/nullptr, - /*prepare=*/activations::ReluPrepare, - /*invoke=*/activations::ReluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_RELU6() { - return {/*init=*/activations::Relu6Init, - /*free=*/nullptr, - /*prepare=*/activations::Relu6Prepare, - /*invoke=*/activations::Relu6Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/add.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/add.cc deleted file mode 100644 index e50d22cd..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/add.cc +++ /dev/null @@ -1,261 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/add.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace add { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; - - // Used only for float evals: - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const double twice_max_input_scale = - 2 * static_cast( - std::max(input1->params.scale, input2->params.scale)); - const double real_input1_multiplier = - static_cast(input1->params.scale) / twice_max_input_scale; - const double real_input2_multiplier = - static_cast(input2->params.scale) / twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } else if (output->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, - &data->output_activation_min_f32, - &data->output_activation_max_f32); - } - - return kTfLiteOk; -} - -void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params; - SetActivationParams(data->output_activation_min_f32, - data->output_activation_max_f32, &op_params); - if (data->requires_broadcast) { - reference_ops::BroadcastAdd4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteAddParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - reference_integer_ops::BroadcastAdd4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::Add( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - if (need_broadcast) { - reference_ops::BroadcastAdd4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (output->type == kTfLiteFloat32) { - EvalAdd(context, node, params, data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace add - -TfLiteRegistration Register_ADD() { - return {/*init=*/add::Init, - /*free=*/nullptr, - /*prepare=*/add::Prepare, - /*invoke=*/add::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/arg_min_max.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/arg_min_max.cc deleted file mode 100644 index 12ac0019..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/arg_min_max.cc +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/arg_min_max.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace arg_min_max { - -constexpr int kInputTensor = 0; -constexpr int kAxis = 1; -constexpr int kOutputTensor = 0; - -template -inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, - const T1* input1_data, const T3* input2_data, - const RuntimeShape& output_shape, T2* output_data, - bool is_arg_max) { - if (is_arg_max) { - reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Greater()); - } else { - reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Less()); - } -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* axis = - tflite::micro::GetEvalInput(context, node, kAxis); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - -#define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ - ArgMinMaxHelper(tflite::micro::GetTensorShape(input), \ - tflite::micro::GetTensorData(input), \ - tflite::micro::GetTensorData(axis), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output), \ - is_arg_max) - if (axis->type == kTfLiteInt32) { - if (output->type == kTfLiteInt32) { - switch (input->type) { - case kTfLiteFloat32: - TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); - break; - case kTfLiteUInt8: - TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); - break; - case kTfLiteInt8: - TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); - break; - default: - TF_LITE_KERNEL_LOG(context, - "Only float32, uint8_t and int8_t are " - "supported currently, got %s.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, - "Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, "Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(axis->type)); - return kTfLiteError; - } - -#undef TF_LITE_ARG_MIN_MAX - - return kTfLiteOk; -} - -TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) { - return Eval(context, node, false); -} - -TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { - return Eval(context, node, true); -} - -} // namespace arg_min_max - -TfLiteRegistration Register_ARG_MAX() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/arg_min_max::ArgMaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_ARG_MIN() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/arg_min_max::ArgMinEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ceil.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ceil.cc deleted file mode 100644 index f929ce62..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ceil.cc +++ /dev/null @@ -1,76 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/ceil.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace ceil { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); - TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); - TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); - for (int i = 0; i < output->dims->size; ++i) { - TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); - } - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - reference_ops::Ceil(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; -} -} // namespace ceil - -TfLiteRegistration Register_CEIL() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ceil::Prepare, - /*invoke=*/ceil::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/circular_buffer.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/circular_buffer.cc deleted file mode 100644 index 5ce8dbe1..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/circular_buffer.cc +++ /dev/null @@ -1,177 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -/* - * The circular buffer custom operator is used to implement strided streaming - * convolutions on TFLite Micro. Each time this operator is invoked, it checks - * whether or not to run, based on a predetermined stride in time. If the op - * runs, it inserts the input into the end of the output buffer and shifts the - * output values towards the start of the buffer. It discards the oldest value - * in the output buffer. - * - * Input: [, , , ] - * - * After shifting: - * Output: [, , , ] - * - * We make some assumptions in this custom operator: - * - Input shape must be [1, 1, 1, depth] - * - Output shape must be [1, num_slots, 1, depth] - * - Input and output types must match. - * - Input and output quantization params must be identical. - */ -namespace tflite { -namespace ops { -namespace micro { -namespace circular_buffer { - -namespace { - -// The CircularBuffer op has one input and one output tensor. -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -// TODO(b/149795762): Add this to TfLiteStatus enum. -constexpr int kTfLiteAbort = -9; - -// These fields control the stride period of a strided streaming model. This op -// returns kTfLiteAbort until cycles_until_run-- is zero. At this time, -// cycles_until_run is reset to cycles_max. -struct OpData { - int cycles_until_run; - int cycles_max; -}; - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* op_data = static_cast(node->user_data); - - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); - TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]); - TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]); - TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - // The circular buffer custom operator currently only supports int8. - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - - // The last circular buffer layer simply accumulates outputs, and does not run - // periodically. - // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. - static int cb_prepare_count = 0; - cb_prepare_count++; - // These checks specifically work for the only two streaming models supported - // on TFLM. They use the shape of the output tensor along with the layer - // number to determine if the circular buffer period should be 1 or 2. - - // These models are outlined int the following documents: - // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing - // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing - if (output->dims->data[1] == 5 || output->dims->data[1] == 13 || - (cb_prepare_count == 5 && output->dims->data[2] == 2 && - output->dims->data[3] == 96)) { - op_data->cycles_max = 1; - cb_prepare_count = 0; - } else { - op_data->cycles_max = 2; - } - op_data->cycles_until_run = op_data->cycles_max; - node->user_data = op_data; - - return kTfLiteOk; -} - -// Shifts buffer over by the output depth, and write new input to end of buffer. -// num_slots is the number of samples stored in the output buffer. -// depth is the size of each sample. -void EvalInt8(const int8_t* input, int num_slots, int depth, int8_t* output) { - memmove(output, &output[depth], (num_slots - 1) * depth); - memcpy(&output[(num_slots - 1) * depth], input, depth); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = reinterpret_cast(node->user_data); - - int num_slots = output->dims->data[1]; - int depth = output->dims->data[2] * output->dims->data[3]; - - if (input->type == kTfLiteInt8) { - EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - if (--data->cycles_until_run != 0) { - // Signal the interpreter to end current run if the delay before op invoke - // has not been reached. - // TODO(b/149795762): Add kTfLiteAbort to TfLiteStatus enum. - return static_cast(kTfLiteAbort); - } - - data->cycles_until_run = data->cycles_max; - - return kTfLiteOk; -} - -} // namespace circular_buffer - -TfLiteRegistration* Register_CIRCULAR_BUFFER() { - static TfLiteRegistration r = {/*init=*/circular_buffer::Init, - /*free=*/nullptr, - /*prepare=*/circular_buffer::Prepare, - /*invoke=*/circular_buffer::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; - return &r; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/comparisons.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/comparisons.cc deleted file mode 100644 index 35007640..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/comparisons.cc +++ /dev/null @@ -1,724 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/comparisons.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace comparisons { -namespace { - -struct OpData { - ComparisonParams params; -}; - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteBool: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -// TODO(renjieliu): Refactor the logic to avoid duplications. -TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteBool: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - RuntimeShape input1_shape = tflite::micro::GetTensorShape(input1); - RuntimeShape input2_shape = tflite::micro::GetTensorShape(input2); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - bool* output_data = tflite::micro::GetTensorData(output); - - bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); - switch (input1->type) { - case kTfLiteFloat32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt32: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt64: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualNoScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - case kTfLiteInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - - if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { - auto input1_offset = -input1->params.zero_point; - auto input2_offset = -input2->params.zero_point; - const int kLeftShift = 8; - - int32_t input1_multiplier; - int input1_shift; - QuantizeMultiplierSmallerThanOneExp( - static_cast(input1->params.scale), &input1_multiplier, - &input1_shift); - int32_t input2_multiplier; - int input2_shift; - QuantizeMultiplierSmallerThanOneExp( - static_cast(input2->params.scale), &input2_multiplier, - &input2_shift); - - data->params.left_shift = kLeftShift; - data->params.input1_offset = input1_offset; - data->params.input1_multiplier = input1_multiplier; - data->params.input1_shift = input1_shift; - data->params.input2_offset = input2_offset; - data->params.input2_multiplier = input2_multiplier; - data->params.input2_shift = input2_shift; - } - - return kTfLiteOk; -} - -} // namespace comparisons - -TfLiteRegistration Register_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::EqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_NOT_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::NotEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_GREATER() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::GreaterEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_GREATER_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::GreaterEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LESS() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::LessEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LESS_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::LessEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/concatenation.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/concatenation.cc deleted file mode 100644 index 8127cc32..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/concatenation.cc +++ /dev/null @@ -1,276 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/concatenation.h" - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace concatenation { - -constexpr int kMaxInputNum = 10; // Maximum number of input tensors -constexpr int kOutputTensor = 0; - -struct OpData { - ConcatenationParams params; -}; - -// Handles negative axis index, coerces to positive index value. -inline int CalculatePositiveAxis(int axis, const TfLiteTensor* output_tensor) { - if (axis >= 0) { - return axis; - } else { - return NumDimensions(output_tensor) + axis; - } -} - -// The following functions are helpers to get tensor data in the format that the -// reference op implementation expects. They provide the same functionality as -// class VectorOfTensors and class VectorOfQuantizedTensors in TFLite. - -// Gets shapes from a list of tensors. -inline void GetAllInputTensorShapes(const TfLiteContext* context, - const TfLiteNode* node, - RuntimeShape all_shapes[kMaxInputNum]) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); - RuntimeShape shape = tflite::micro::GetTensorShape(t); - all_shapes[i].ReplaceWith(shape.DimensionsCount(), shape.DimsData()); - } -} - -// Get shape pointers from a list of shapes. -inline void GetShapesPointers(const RuntimeShape* shapes, size_t num, - const RuntimeShape* pointers[]) { - for (size_t i = 0; i < num; ++i) { - pointers[i] = &shapes[i]; - } -} - -// Gets data pointers from a list of tensors. -template -inline void GetAllInputTensorData(const TfLiteContext* context, - const TfLiteNode* node, - T* all_data[kMaxInputNum]) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); - all_data[i] = tflite::micro::GetTensorData(t); - } -} - -template -void EvalUnquantized(TfLiteContext* context, TfLiteNode* node) { - // Collect the shapes and data pointer of input tensors - RuntimeShape inputs_shape[kMaxInputNum]; - const RuntimeShape* inputs_shape_ptr[kMaxInputNum]; - const data_type* inputs_data[kMaxInputNum]; - GetAllInputTensorShapes(context, node, inputs_shape); - GetShapesPointers(inputs_shape, node->inputs->size, inputs_shape_ptr); - GetAllInputTensorData(context, node, inputs_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - reference_ops::Concatenation(data->params, inputs_shape_ptr, inputs_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node) { - // Collect the shapes and data pointer of input tensors - RuntimeShape inputs_shape[kMaxInputNum]; - const RuntimeShape* inputs_shape_ptr[kMaxInputNum]; - const uint8_t* inputs_data[kMaxInputNum]; - GetAllInputTensorShapes(context, node, inputs_shape); - GetShapesPointers(inputs_shape, node->inputs->size, inputs_shape_ptr); - GetAllInputTensorData(context, node, inputs_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - reference_ops::ConcatenationWithScaling( - data->params, inputs_shape_ptr, inputs_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - // This function only checks the types. Additional shape validations are - // performed in the reference implementation called during Eval(). - const TfLiteConcatenationParams* params = - reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input_tensor = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input_tensor != nullptr); - TfLiteType input_type = input_tensor->type; - const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output_tensor != nullptr); - TfLiteType output_type = output_tensor->type; - - // Check activation and input type - TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); - TF_LITE_ENSURE(context, - input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || - input_type == kTfLiteInt8 || input_type == kTfLiteInt32 || - input_type == kTfLiteInt64); - - // Output type must match input type - TF_LITE_ENSURE_EQ(context, output_type, input_type); - - // This implementation does not support large number of input tensors - const int num_inputs = NumInputs(node); - TF_LITE_ENSURE(context, num_inputs <= kMaxInputNum); - - // Shapes with dimensions >4 are not yet supported with static allocation. - for (int i = 0; i < num_inputs; ++i) { - const TfLiteTensor* input = GetInput(context, node, i); - TF_LITE_ENSURE(context, input != nullptr); - int num_dimensions = NumDimensions(input); - - if (num_dimensions > 4) { - TF_LITE_KERNEL_LOG( - context, - "Op Concatenation does not currently support num dimensions >4 " - "Tensor has %d dimensions.", - num_dimensions); - return kTfLiteError; - } - } - - // Calculate OpData. - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - switch (output_type) { // Already know in/outtypes are same. - case kTfLiteFloat32: - case kTfLiteInt32: - case kTfLiteInt64: { - data->params.axis = CalculatePositiveAxis(params->axis, output); - data->params.inputs_count = node->inputs->size; - break; - } - case kTfLiteUInt8: - case kTfLiteInt8: { - data->params.axis = CalculatePositiveAxis(params->axis, output); - data->params.inputs_count = node->inputs->size; - - float* input_scales = - reinterpret_cast(context->AllocatePersistentBuffer( - context, node->inputs->size * sizeof(float))); - - int32_t* input_zero_points = - reinterpret_cast(context->AllocatePersistentBuffer( - context, node->inputs->size * sizeof(int32_t))); - - // Allocate persistent scale and zeropoint buffers. - // Store input scale and zero point values in OpParams: - for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteTensor* t = GetInput(context, node, i); - TF_LITE_ENSURE(context, t != nullptr); - input_scales[i] = t->params.scale; - input_zero_points[i] = t->params.zero_point; - } - - data->params.input_scale = input_scales; - data->params.input_zeropoint = input_zero_points; - data->params.output_zeropoint = output->params.zero_point; - data->params.output_scale = output->params.scale; - break; - } - default: - TF_LITE_KERNEL_LOG( - context, "Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output_tensor != nullptr); - TfLiteType output_type = output_tensor->type; - - switch (output_type) { // Already know in/outtypes are same. - case kTfLiteFloat32: - EvalUnquantized(context, node); - break; - case kTfLiteInt32: - EvalUnquantized(context, node); - break; - case kTfLiteUInt8: - EvalQuantizedUInt8(context, node); - break; - case kTfLiteInt8: - EvalUnquantized(context, node); - break; - case kTfLiteInt64: - EvalUnquantized(context, node); - break; - - default: - TF_LITE_KERNEL_LOG( - context, "Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace concatenation - -TfLiteRegistration Register_CONCATENATION() { - return {/*init=*/concatenation::Init, - /*free=*/nullptr, - /*prepare=*/concatenation::Prepare, - /*invoke=*/concatenation::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv.cc deleted file mode 100644 index dc821df5..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv.cc +++ /dev/null @@ -1,340 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -// This file has 2 implementation of Conv. - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; -}; - -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteConvParams* params, int width, - int height, int filter_width, int filter_height, - int out_width, int out_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - params->dilation_height_factor, params->dilation_width_factor, height, - width, filter_height, filter_width, padding, &out_height, &out_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int output_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), - output_channels)); - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = static_cast(node->builtin_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - int input_width = input->dims->data[2]; - int input_height = input->dims->data[1]; - int filter_width = filter->dims->data[2]; - int filter_height = filter->dims->data[1]; - int output_width = output->dims->data[2]; - int output_height = output->dims->data[1]; - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - return kTfLiteOk; -} // namespace conv - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* im2col, TfLiteEvalTensor* hwcn_weights, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col), nullptr); -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output, - TfLiteEvalTensor* im2col) { - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::ConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* im2col, - TfLiteEvalTensor* hwcn_weights, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - // TODO(b/154032858): Investigate removing extra copies. - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); - break; - case kTfLiteInt8: - EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, - output, nullptr); - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv_test.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv_test.h deleted file mode 100644 index ef04307c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv_test.h +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/kernels/kernel_runner.h" -#include "tensorflow/lite/micro/kernels/micro_ops.h" -#include "tensorflow/lite/micro/test_helpers.h" -#include "tensorflow/lite/micro/testing/micro_test.h" - -namespace tflite { -namespace testing { - -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, float* output_data); - -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, int8_t* output_data); - -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, uint8_t* output_data); - -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const float* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - float* output_data, float tolerance = 1e-5); - -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const int8_t* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - int8_t* output_data, float tolerance = 1e-5); - -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const uint8_t* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - uint8_t* output_data, float tolerance = 1e-5); - -void TestConvFloat(const int* input_dims_data, const float* input_data, - const int* filter_dims_data, const float* filter_data, - const int* bias_dims_data, const float* bias_data, - const int* output_dims_data, - const float* expected_output_data, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, float* output_data); - -void TestConvQuantizedPerLayer( - const int* input_dims_data, const float* input_data, - uint8_t* input_quantized, float input_scale, const int* filter_dims_data, - const float* filter_data, uint8_t* filter_quantized, float filter_scale, - const int* bias_dims_data, const float* bias_data, int32_t* bias_quantized, - const int* output_dims_data, const float* expected_output_data, - uint8_t* expected_output_quantized, float output_scale, - TfLiteConvParams* conv_params, TfLiteRegistration registration, - uint8_t* output_data); - -void TestConvQuantizedPerChannel( - const int* input_dims_data, const float* input_data, - int8_t* input_quantized, float input_scale, int input_zero_point, - const int* filter_dims_data, const float* filter_data, - int8_t* filter_data_quantized, const int* bias_dims_data, - const float* bias_data, int32_t* bias_data_quantized, float* bias_scales, - int* bias_zero_points, const int* output_dims_data, - const float* expected_output_data, int8_t* expected_output_data_quantized, - float output_scale, int output_zero_point, TfLiteConvParams* conv_params, - TfLiteRegistration registration, int8_t* output_data); - -} // namespace testing -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv_test_common.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv_test_common.cc deleted file mode 100644 index 9b81e196..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/conv_test_common.cc +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/conv_test.h" - -namespace tflite { -namespace testing { - -template -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, T* output_data) { - int inputs_array_data[] = {3, 0, 1, 2}; - TfLiteIntArray* inputs_array = IntArrayFromInts(inputs_array_data); - int outputs_array_data[] = {1, 3}; - TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); - - micro::KernelRunner runner( - registration, tensors, tensors_size, inputs_array, outputs_array, - reinterpret_cast(conv_params), micro_test::reporter); - - const char* init_data = reinterpret_cast(conv_params); - TfLiteStatus status = runner.InitAndPrepare(init_data); - if (status != kTfLiteOk) { - return status; - } - return runner.Invoke(); -} - -template -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const T* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - T* output_data, float tolerance) { - TfLiteStatus status = InvokeConv(tensors, tensors_size, output_length, - conv_params, registration, output_data); - if (status != kTfLiteOk) { - return status; - } - for (int i = 0; i < output_length; ++i) { - TF_LITE_MICRO_EXPECT_NEAR(expected_output_data[i], output_data[i], - tolerance); - } - return kTfLiteOk; -} - -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, float* output_data) { - return InvokeConv(tensors, tensors_size, output_length, conv_params, - registration, output_data); -} - -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, int8_t* output_data) { - return InvokeConv(tensors, tensors_size, output_length, conv_params, - registration, output_data); -} - -TfLiteStatus InvokeConv(TfLiteTensor* tensors, int tensors_size, - int output_length, TfLiteConvParams* conv_params, - TfLiteRegistration registration, uint8_t* output_data) { - return InvokeConv(tensors, tensors_size, output_length, conv_params, - registration, output_data); -} - -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const float* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - float* output_data, float tolerance) { - return ValidateConvGoldens(tensors, tensors_size, expected_output_data, - output_length, conv_params, registration, - output_data, tolerance); -} - -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const int8_t* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - int8_t* output_data, float tolerance) { - return ValidateConvGoldens( - tensors, tensors_size, expected_output_data, output_length, conv_params, - registration, output_data, tolerance); -} - -TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, - const uint8_t* expected_output_data, - int output_length, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, - uint8_t* output_data, float tolerance) { - return ValidateConvGoldens( - tensors, tensors_size, expected_output_data, output_length, conv_params, - registration, output_data, tolerance); -} - -void TestConvFloat(const int* input_dims_data, const float* input_data, - const int* filter_dims_data, const float* filter_data, - const int* bias_dims_data, const float* bias_data, - const int* output_dims_data, - const float* expected_output_data, - TfLiteConvParams* conv_params, - TfLiteRegistration registration, float* output_data) { - TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); - TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data); - TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data); - TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); - const int output_dims_count = ElementCount(*output_dims); - constexpr int inputs_size = 3; - constexpr int outputs_size = 1; - constexpr int tensors_size = inputs_size + outputs_size; - TfLiteTensor tensors[tensors_size] = { - CreateTensor(input_data, input_dims), - CreateTensor(filter_data, filter_dims), - CreateTensor(bias_data, bias_dims), - CreateTensor(output_data, output_dims), - }; - - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, ValidateConvGoldens(tensors, tensors_size, - expected_output_data, output_dims_count, - conv_params, registration, output_data)); -} - -void TestConvQuantizedPerLayer( - const int* input_dims_data, const float* input_data, - uint8_t* input_quantized, float input_scale, const int* filter_dims_data, - const float* filter_data, uint8_t* filter_quantized, float filter_scale, - const int* bias_dims_data, const float* bias_data, int32_t* bias_quantized, - const int* output_dims_data, const float* expected_output_data, - uint8_t* expected_output_quantized, float output_scale, - TfLiteConvParams* conv_params, TfLiteRegistration registration, - uint8_t* output_data) { - TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); - TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data); - TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data); - TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); - const int output_dims_count = ElementCount(*output_dims); - - tflite::Quantize(expected_output_data, expected_output_quantized, - output_dims_count, output_scale, 128); - - constexpr int inputs_size = 3; - constexpr int outputs_size = 1; - constexpr int tensors_size = inputs_size + outputs_size; - TfLiteTensor tensors[tensors_size] = { - CreateQuantizedTensor(input_data, input_quantized, input_dims, - input_scale, 128), - CreateQuantizedTensor(filter_data, filter_quantized, filter_dims, - filter_scale, 128), - CreateQuantizedBiasTensor(bias_data, bias_quantized, bias_dims, - input_scale, filter_scale), - CreateQuantizedTensor(output_data, output_dims, output_scale, 128)}; - - float filter_scales[] = {1, filter_scale}; - int filter_zero_points[] = {1, 128}; - TfLiteAffineQuantization filter_quant = {FloatArrayFromFloats(filter_scales), - IntArrayFromInts(filter_zero_points), - 0}; - tensors[1].quantization = {kTfLiteAffineQuantization, &filter_quant}; - - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, - ValidateConvGoldens(tensors, tensors_size, expected_output_quantized, - output_dims_count, conv_params, registration, - output_data)); -} - -void TestConvQuantizedPerChannel( - const int* input_dims_data, const float* input_data, - int8_t* input_quantized, float input_scale, int input_zero_point, - const int* filter_dims_data, const float* filter_data, - int8_t* filter_data_quantized, const int* bias_dims_data, - const float* bias_data, int32_t* bias_data_quantized, float* bias_scales, - int* bias_zero_points, const int* output_dims_data, - const float* expected_output_data, int8_t* expected_output_data_quantized, - float output_scale, int output_zero_point, TfLiteConvParams* conv_params, - TfLiteRegistration registration, int8_t* output_data) { - TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data); - TfLiteIntArray* filter_dims = IntArrayFromInts(filter_dims_data); - TfLiteIntArray* bias_dims = IntArrayFromInts(bias_dims_data); - TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data); - const int output_dims_count = ElementCount(*output_dims); - - int filter_zero_points[5]; - float filter_scales[5]; - TfLiteAffineQuantization filter_quant; - TfLiteAffineQuantization bias_quant; - TfLiteTensor input_tensor = CreateQuantizedTensor( - input_data, input_quantized, input_dims, input_scale, input_zero_point); - TfLiteTensor filter_tensor = CreateSymmetricPerChannelQuantizedTensor( - filter_data, filter_data_quantized, filter_dims, filter_scales, - filter_zero_points, &filter_quant, 0 /* quantized dimension */); - TfLiteTensor bias_tensor = CreatePerChannelQuantizedBiasTensor( - bias_data, bias_data_quantized, bias_dims, input_scale, &filter_scales[1], - bias_scales, bias_zero_points, &bias_quant, 0 /* quantized dimension */); - TfLiteTensor output_tensor = CreateQuantizedTensor( - output_data, output_dims, output_scale, output_zero_point); - - float input_scales[] = {1, input_scale}; - int input_zero_points[] = {1, input_zero_point}; - TfLiteAffineQuantization input_quant = {FloatArrayFromFloats(input_scales), - IntArrayFromInts(input_zero_points), - 0}; - input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant}; - - float output_scales[] = {1, output_scale}; - int output_zero_points[] = {1, output_zero_point}; - TfLiteAffineQuantization output_quant = {FloatArrayFromFloats(output_scales), - IntArrayFromInts(output_zero_points), - 0}; - output_tensor.quantization = {kTfLiteAffineQuantization, &output_quant}; - - constexpr int inputs_size = 3; - constexpr int outputs_size = 1; - constexpr int tensors_size = inputs_size + outputs_size; - TfLiteTensor tensors[tensors_size] = { - input_tensor, - filter_tensor, - bias_tensor, - output_tensor, - }; - - tflite::Quantize(expected_output_data, expected_output_data_quantized, - output_dims_count, output_scale, output_zero_point); - TF_LITE_MICRO_EXPECT_EQ( - kTfLiteOk, - ValidateConvGoldens(tensors, tensors_size, expected_output_data_quantized, - output_dims_count, conv_params, registration, - output_data, 1.0 /* tolerance */)); -} - -} // namespace testing -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/depthwise_conv.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/depthwise_conv.cc deleted file mode 100644 index 08e969b1..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/depthwise_conv.cc +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Depthwise conv is quantized along dimension 3: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kDepthwiseConvQuantizedDimension = 3; - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, int width, - int height, int filter_width, int filter_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - int unused_output_height, unused_output_width; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, 1, 1, height, width, - filter_height, filter_width, params->padding, &unused_output_height, - &unused_output_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - - return tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), num_channels); - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = - reinterpret_cast(node->builtin_data); - OpData* data = static_cast(node->user_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - const TfLiteType data_type = input->type; - int width = SizeOfDimension(input, 2); - int height = SizeOfDimension(input, 1); - int filter_width = SizeOfDimension(filter, 2); - int filter_height = SizeOfDimension(filter, 1); - - // Per channel quantization is only needed for int8_t inference. For other - // quantized types, only a single scale and zero point is needed. - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - // Dynamically allocate per-channel quantization parameters. - data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - reinterpret_cast( - filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - TF_LITE_ENSURE( - context, affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, - filter_width, filter_height, data_type, - data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - return kTfLiteOk; -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - DepthwiseParams op_params; - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = 0; - op_params.output_offset = data.output_zero_point; - // TODO(b/130439627): Use calculated value for clamping. - op_params.quantized_activation_min = std::numeric_limits::min(); - op_params.quantized_activation_max = std::numeric_limits::max(); - - reference_integer_ops::DepthwiseConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = - reinterpret_cast(node->builtin_data); - const OpData& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - - // TODO(aselle): Consider whether float conv and quantized conv should be - // separate ops to avoid dispatch overhead here. - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, output); - break; - case kTfLiteInt8: - EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, - output); - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, data, input, filter, bias, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/dequantize.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/dequantize.cc deleted file mode 100644 index f4e2eb9f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/dequantize.cc +++ /dev/null @@ -1,166 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/dequantize.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/quantize.h" -#include "tensorflow/lite/kernels/internal/reference/requantize.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace dequantize { - -struct OpData { - tflite::DequantizationParams quantization_params; - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - int32_t output_zero_point; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - // TODO(b/140515557): Add cached dequant to improve hybrid model performance. - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, input->type == kTfLiteUInt8 || - input->type == kTfLiteInt8 || - input->type == kTfLiteInt16); - TF_LITE_ENSURE( - context, output->type == kTfLiteFloat32 || output->type == kTfLiteInt32); - - if (output->type == kTfLiteInt32) { - const double effective_output_scale = - static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(effective_output_scale, &data->output_multiplier, - &data->output_shift); - } - - data->quantization_params.zero_point = input->params.zero_point; - data->quantization_params.scale = static_cast(input->params.scale); - data->output_zero_point = output->params.zero_point; - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - if (output->type == kTfLiteFloat32) { - switch (input->type) { - case kTfLiteUInt8: - reference_ops::Dequantize(data->quantization_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::Dequantize(data->quantization_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::Dequantize(data->quantization_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (output->type == kTfLiteInt32) { - int flat_size = MatchingFlatSize(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output)); - switch (input->type) { - case kTfLiteInt16: { - reference_ops::Requantize( - tflite::micro::GetTensorData(input), flat_size, - data->output_multiplier, data->output_shift, - data->quantization_params.zero_point, data->output_zero_point, - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: { - reference_ops::Requantize( - tflite::micro::GetTensorData(input), flat_size, - data->output_multiplier, data->output_shift, - data->quantization_params.zero_point, data->output_zero_point, - tflite::micro::GetTensorData(output)); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace dequantize - -TfLiteRegistration Register_DEQUANTIZE() { - return {/*init=*/dequantize::Init, - /*free=*/nullptr, - /*prepare=*/dequantize::Prepare, - /*invoke=*/dequantize::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/detection_postprocess.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/detection_postprocess.cc deleted file mode 100644 index b1f68eba..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/detection_postprocess.cc +++ /dev/null @@ -1,805 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#define FLATBUFFERS_LOCALE_INDEPENDENT 0 -#include "flatbuffers/flexbuffers.h" -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -/** - * This version of detection_postprocess is specific to TFLite Micro. It - * contains the following differences between the TFLite version: - * - * 1.) Temporaries (temporary tensors) - Micro use instead scratch buffer API. - * 2.) Output dimensions - the TFLite version does not support undefined out - * dimensions. So model must have static out dimensions. - */ - -// Input tensors -constexpr int kInputTensorBoxEncodings = 0; -constexpr int kInputTensorClassPredictions = 1; -constexpr int kInputTensorAnchors = 2; - -// Output tensors -constexpr int kOutputTensorDetectionBoxes = 0; -constexpr int kOutputTensorDetectionClasses = 1; -constexpr int kOutputTensorDetectionScores = 2; -constexpr int kOutputTensorNumDetections = 3; - -constexpr int kNumCoordBox = 4; -constexpr int kBatchSize = 1; - -constexpr int kNumDetectionsPerClass = 100; - -// Object Detection model produces axis-aligned boxes in two formats: -// BoxCorner represents the lower left corner (xmin, ymin) and -// the upper right corner (xmax, ymax). -// CenterSize represents the center (xcenter, ycenter), height and width. -// BoxCornerEncoding and CenterSizeEncoding are related as follows: -// ycenter = y / y_scale * anchor.h + anchor.y; -// xcenter = x / x_scale * anchor.w + anchor.x; -// half_h = 0.5*exp(h/ h_scale)) * anchor.h; -// half_w = 0.5*exp(w / w_scale)) * anchor.w; -// ymin = ycenter - half_h -// ymax = ycenter + half_h -// xmin = xcenter - half_w -// xmax = xcenter + half_w -struct BoxCornerEncoding { - float ymin; - float xmin; - float ymax; - float xmax; -}; - -struct CenterSizeEncoding { - float y; - float x; - float h; - float w; -}; -// We make sure that the memory allocations are contiguous with static CHECK. -static_assert(sizeof(BoxCornerEncoding) == sizeof(float) * kNumCoordBox, - "Size of BoxCornerEncoding is 4 float values"); -static_assert(sizeof(CenterSizeEncoding) == sizeof(float) * kNumCoordBox, - "Size of CenterSizeEncoding is 4 float values"); - -struct OpData { - int max_detections; - int max_classes_per_detection; // Fast Non-Max-Suppression - int detections_per_class; // Regular Non-Max-Suppression - float non_max_suppression_score_threshold; - float intersection_over_union_threshold; - int num_classes; - bool use_regular_non_max_suppression; - CenterSizeEncoding scale_values; - - // Scratch buffers indexes - int active_candidate_idx; - int decoded_boxes_idx; - int scores_idx; - int score_buffer_idx; - int keep_scores_idx; - int scores_after_regular_non_max_suppression_idx; - int sorted_values_idx; - int keep_indices_idx; - int sorted_indices_idx; - int buffer_idx; - int selected_idx; - - // Cached tensor scale and zero point values for quantized operations - TfLiteQuantizationParams input_box_encodings; - TfLiteQuantizationParams input_class_predictions; - TfLiteQuantizationParams input_anchors; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - OpData* op_data = nullptr; - - const uint8_t* buffer_t = reinterpret_cast(buffer); - const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); - - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - op_data = reinterpret_cast( - context->AllocatePersistentBuffer(context, sizeof(OpData))); - - op_data->max_detections = m["max_detections"].AsInt32(); - op_data->max_classes_per_detection = m["max_classes_per_detection"].AsInt32(); - if (m["detections_per_class"].IsNull()) - op_data->detections_per_class = kNumDetectionsPerClass; - else - op_data->detections_per_class = m["detections_per_class"].AsInt32(); - if (m["use_regular_nms"].IsNull()) - op_data->use_regular_non_max_suppression = false; - else - op_data->use_regular_non_max_suppression = m["use_regular_nms"].AsBool(); - - op_data->non_max_suppression_score_threshold = - m["nms_score_threshold"].AsFloat(); - op_data->intersection_over_union_threshold = m["nms_iou_threshold"].AsFloat(); - op_data->num_classes = m["num_classes"].AsInt32(); - op_data->scale_values.y = m["y_scale"].AsFloat(); - op_data->scale_values.x = m["x_scale"].AsFloat(); - op_data->scale_values.h = m["h_scale"].AsFloat(); - op_data->scale_values.w = m["w_scale"].AsFloat(); - - return op_data; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - auto* op_data = static_cast(node->user_data); - - // Inputs: box_encodings, scores, anchors - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - const TfLiteTensor* input_box_encodings = - GetInput(context, node, kInputTensorBoxEncodings); - const TfLiteTensor* input_class_predictions = - GetInput(context, node, kInputTensorClassPredictions); - const TfLiteTensor* input_anchors = - GetInput(context, node, kInputTensorAnchors); - TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3); - TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3); - TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2); - - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4); - const int num_boxes = input_box_encodings->dims->data[1]; - const int num_classes = op_data->num_classes; - - op_data->input_box_encodings.scale = input_box_encodings->params.scale; - op_data->input_box_encodings.zero_point = - input_box_encodings->params.zero_point; - op_data->input_class_predictions.scale = - input_class_predictions->params.scale; - op_data->input_class_predictions.zero_point = - input_class_predictions->params.zero_point; - op_data->input_anchors.scale = input_anchors->params.scale; - op_data->input_anchors.zero_point = input_anchors->params.zero_point; - - // Scratch tensors - context->RequestScratchBufferInArena(context, num_boxes, - &op_data->active_candidate_idx); - context->RequestScratchBufferInArena(context, - num_boxes * kNumCoordBox * sizeof(float), - &op_data->decoded_boxes_idx); - context->RequestScratchBufferInArena( - context, - input_class_predictions->dims->data[1] * - input_class_predictions->dims->data[2] * sizeof(float), - &op_data->scores_idx); - - // Additional buffers - context->RequestScratchBufferInArena(context, num_boxes * sizeof(float), - &op_data->score_buffer_idx); - context->RequestScratchBufferInArena(context, num_boxes * sizeof(float), - &op_data->keep_scores_idx); - context->RequestScratchBufferInArena( - context, op_data->max_detections * num_boxes * sizeof(float), - &op_data->scores_after_regular_non_max_suppression_idx); - context->RequestScratchBufferInArena( - context, op_data->max_detections * num_boxes * sizeof(float), - &op_data->sorted_values_idx); - context->RequestScratchBufferInArena(context, num_boxes * sizeof(int), - &op_data->keep_indices_idx); - context->RequestScratchBufferInArena( - context, op_data->max_detections * num_boxes * sizeof(int), - &op_data->sorted_indices_idx); - int buffer_size = std::max(num_classes, op_data->max_detections); - context->RequestScratchBufferInArena( - context, buffer_size * num_boxes * sizeof(int), &op_data->buffer_idx); - buffer_size = std::min(num_boxes, op_data->max_detections); - context->RequestScratchBufferInArena( - context, buffer_size * num_boxes * sizeof(int), &op_data->selected_idx); - - // Outputs: detection_boxes, detection_scores, detection_classes, - // num_detections - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4); - - return kTfLiteOk; -} - -class Dequantizer { - public: - Dequantizer(int zero_point, float scale) - : zero_point_(zero_point), scale_(scale) {} - float operator()(uint8_t x) { - return (static_cast(x) - zero_point_) * scale_; - } - - private: - int zero_point_; - float scale_; -}; - -void DequantizeBoxEncodings(const TfLiteEvalTensor* input_box_encodings, - int idx, float quant_zero_point, float quant_scale, - int length_box_encoding, - CenterSizeEncoding* box_centersize) { - const uint8_t* boxes = - tflite::micro::GetTensorData(input_box_encodings) + - length_box_encoding * idx; - Dequantizer dequantize(quant_zero_point, quant_scale); - // See definition of the KeyPointBoxCoder at - // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/keypoint_box_coder.py - // The first four elements are the box coordinates, which is the same as the - // FastRnnBoxCoder at - // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/faster_rcnn_box_coder.py - box_centersize->y = dequantize(boxes[0]); - box_centersize->x = dequantize(boxes[1]); - box_centersize->h = dequantize(boxes[2]); - box_centersize->w = dequantize(boxes[3]); -} - -template -T ReInterpretTensor(const TfLiteEvalTensor* tensor) { - const float* tensor_base = tflite::micro::GetTensorData(tensor); - return reinterpret_cast(tensor_base); -} - -template -T ReInterpretTensor(TfLiteEvalTensor* tensor) { - float* tensor_base = tflite::micro::GetTensorData(tensor); - return reinterpret_cast(tensor_base); -} - -TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, - OpData* op_data) { - // Parse input tensor boxencodings - const TfLiteEvalTensor* input_box_encodings = - tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); - TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize); - const int num_boxes = input_box_encodings->dims->data[1]; - TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox); - const TfLiteEvalTensor* input_anchors = - tflite::micro::GetEvalInput(context, node, kInputTensorAnchors); - - // Decode the boxes to get (ymin, xmin, ymax, xmax) based on the anchors - CenterSizeEncoding box_centersize; - CenterSizeEncoding scale_values = op_data->scale_values; - CenterSizeEncoding anchor; - for (int idx = 0; idx < num_boxes; ++idx) { - switch (input_box_encodings->type) { - // Quantized - case kTfLiteUInt8: - DequantizeBoxEncodings( - input_box_encodings, idx, - static_cast(op_data->input_box_encodings.zero_point), - static_cast(op_data->input_box_encodings.scale), - input_box_encodings->dims->data[2], &box_centersize); - DequantizeBoxEncodings( - input_anchors, idx, - static_cast(op_data->input_anchors.zero_point), - static_cast(op_data->input_anchors.scale), kNumCoordBox, - &anchor); - break; - // Float - case kTfLiteFloat32: { - // Please see DequantizeBoxEncodings function for the support detail. - const int box_encoding_idx = idx * input_box_encodings->dims->data[2]; - const float* boxes = &(tflite::micro::GetTensorData( - input_box_encodings)[box_encoding_idx]); - box_centersize = *reinterpret_cast(boxes); - anchor = - ReInterpretTensor(input_anchors)[idx]; - break; - } - default: - // Unsupported type. - return kTfLiteError; - } - - float ycenter = static_cast(static_cast(box_centersize.y) / - static_cast(scale_values.y) * - static_cast(anchor.h) + - static_cast(anchor.y)); - - float xcenter = static_cast(static_cast(box_centersize.x) / - static_cast(scale_values.x) * - static_cast(anchor.w) + - static_cast(anchor.x)); - - float half_h = - static_cast(0.5 * - (std::exp(static_cast(box_centersize.h) / - static_cast(scale_values.h))) * - static_cast(anchor.h)); - float half_w = - static_cast(0.5 * - (std::exp(static_cast(box_centersize.w) / - static_cast(scale_values.w))) * - static_cast(anchor.w)); - - float* decoded_boxes = reinterpret_cast( - context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); - auto& box = reinterpret_cast(decoded_boxes)[idx]; - box.ymin = ycenter - half_h; - box.xmin = xcenter - half_w; - box.ymax = ycenter + half_h; - box.xmax = xcenter + half_w; - } - return kTfLiteOk; -} - -void DecreasingPartialArgSort(const float* values, int num_values, - int num_to_sort, int* indices) { - std::iota(indices, indices + num_values, 0); - std::partial_sort( - indices, indices + num_to_sort, indices + num_values, - [&values](const int i, const int j) { return values[i] > values[j]; }); -} - -int SelectDetectionsAboveScoreThreshold(const float* values, int size, - const float threshold, - float* keep_values, int* keep_indices) { - int counter = 0; - for (int i = 0; i < size; i++) { - if (values[i] >= threshold) { - keep_values[counter] = values[i]; - keep_indices[counter] = i; - counter++; - } - } - return counter; -} - -bool ValidateBoxes(const float* decoded_boxes, const int num_boxes) { - for (int i = 0; i < num_boxes; ++i) { - // ymax>=ymin, xmax>=xmin - auto& box = reinterpret_cast(decoded_boxes)[i]; - if (box.ymin >= box.ymax || box.xmin >= box.xmax) { - return false; - } - } - return true; -} - -float ComputeIntersectionOverUnion(const float* decoded_boxes, const int i, - const int j) { - auto& box_i = reinterpret_cast(decoded_boxes)[i]; - auto& box_j = reinterpret_cast(decoded_boxes)[j]; - const float area_i = (box_i.ymax - box_i.ymin) * (box_i.xmax - box_i.xmin); - const float area_j = (box_j.ymax - box_j.ymin) * (box_j.xmax - box_j.xmin); - if (area_i <= 0 || area_j <= 0) return 0.0; - const float intersection_ymin = std::max(box_i.ymin, box_j.ymin); - const float intersection_xmin = std::max(box_i.xmin, box_j.xmin); - const float intersection_ymax = std::min(box_i.ymax, box_j.ymax); - const float intersection_xmax = std::min(box_i.xmax, box_j.xmax); - const float intersection_area = - std::max(intersection_ymax - intersection_ymin, 0.0) * - std::max(intersection_xmax - intersection_xmin, 0.0); - return intersection_area / (area_i + area_j - intersection_area); -} - -// NonMaxSuppressionSingleClass() prunes out the box locations with high overlap -// before selecting the highest scoring boxes (max_detections in number) -// It assumes all boxes are good in beginning and sorts based on the scores. -// If lower-scoring box has too much overlap with a higher-scoring box, -// we get rid of the lower-scoring box. -// Complexity is O(N^2) pairwise comparison between boxes -TfLiteStatus NonMaxSuppressionSingleClassHelper( - TfLiteContext* context, TfLiteNode* node, OpData* op_data, - const float* scores, int* selected, int* selected_size, - int max_detections) { - const TfLiteEvalTensor* input_box_encodings = - tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); - const int num_boxes = input_box_encodings->dims->data[1]; - const float non_max_suppression_score_threshold = - op_data->non_max_suppression_score_threshold; - const float intersection_over_union_threshold = - op_data->intersection_over_union_threshold; - // Maximum detections should be positive. - TF_LITE_ENSURE(context, (max_detections >= 0)); - // intersection_over_union_threshold should be positive - // and should be less than 1. - TF_LITE_ENSURE(context, (intersection_over_union_threshold > 0.0f) && - (intersection_over_union_threshold <= 1.0f)); - // Validate boxes - float* decoded_boxes = reinterpret_cast( - context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); - - TF_LITE_ENSURE(context, ValidateBoxes(decoded_boxes, num_boxes)); - - // threshold scores - int* keep_indices = reinterpret_cast( - context->GetScratchBuffer(context, op_data->keep_indices_idx)); - float* keep_scores = reinterpret_cast( - context->GetScratchBuffer(context, op_data->keep_scores_idx)); - int num_scores_kept = SelectDetectionsAboveScoreThreshold( - scores, num_boxes, non_max_suppression_score_threshold, keep_scores, - keep_indices); - int* sorted_indices = reinterpret_cast( - context->GetScratchBuffer(context, op_data->sorted_indices_idx)); - - DecreasingPartialArgSort(keep_scores, num_scores_kept, num_scores_kept, - sorted_indices); - - const int num_boxes_kept = num_scores_kept; - const int output_size = std::min(num_boxes_kept, max_detections); - *selected_size = 0; - - int num_active_candidate = num_boxes_kept; - uint8_t* active_box_candidate = reinterpret_cast( - context->GetScratchBuffer(context, op_data->active_candidate_idx)); - - for (int row = 0; row < num_boxes_kept; row++) { - active_box_candidate[row] = 1; - } - for (int i = 0; i < num_boxes_kept; ++i) { - if (num_active_candidate == 0 || *selected_size >= output_size) break; - if (active_box_candidate[i] == 1) { - selected[(*selected_size)++] = keep_indices[sorted_indices[i]]; - active_box_candidate[i] = 0; - num_active_candidate--; - } else { - continue; - } - for (int j = i + 1; j < num_boxes_kept; ++j) { - if (active_box_candidate[j] == 1) { - float intersection_over_union = ComputeIntersectionOverUnion( - decoded_boxes, keep_indices[sorted_indices[i]], - keep_indices[sorted_indices[j]]); - - if (intersection_over_union > intersection_over_union_threshold) { - active_box_candidate[j] = 0; - num_active_candidate--; - } - } - } - } - - return kTfLiteOk; -} - -// This function implements a regular version of Non Maximal Suppression (NMS) -// for multiple classes where -// 1) we do NMS separately for each class across all anchors and -// 2) keep only the highest anchor scores across all classes -// 3) The worst runtime of the regular NMS is O(K*N^2) -// where N is the number of anchors and K the number of -// classes. -TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context, - TfLiteNode* node, - OpData* op_data, - const float* scores) { - const TfLiteEvalTensor* input_box_encodings = - tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); - const TfLiteEvalTensor* input_class_predictions = - tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); - TfLiteEvalTensor* detection_boxes = - tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes); - TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput( - context, node, kOutputTensorDetectionClasses); - TfLiteEvalTensor* detection_scores = - tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores); - TfLiteEvalTensor* num_detections = - tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections); - - const int num_boxes = input_box_encodings->dims->data[1]; - const int num_classes = op_data->num_classes; - const int num_detections_per_class = op_data->detections_per_class; - const int max_detections = op_data->max_detections; - const int num_classes_with_background = - input_class_predictions->dims->data[2]; - // The row index offset is 1 if background class is included and 0 otherwise. - int label_offset = num_classes_with_background - num_classes; - TF_LITE_ENSURE(context, num_detections_per_class > 0); - - // For each class, perform non-max suppression. - float* class_scores = reinterpret_cast( - context->GetScratchBuffer(context, op_data->score_buffer_idx)); - int* box_indices_after_regular_non_max_suppression = reinterpret_cast( - context->GetScratchBuffer(context, op_data->buffer_idx)); - float* scores_after_regular_non_max_suppression = - reinterpret_cast(context->GetScratchBuffer( - context, op_data->scores_after_regular_non_max_suppression_idx)); - - int size_of_sorted_indices = 0; - int* sorted_indices = reinterpret_cast( - context->GetScratchBuffer(context, op_data->sorted_indices_idx)); - float* sorted_values = reinterpret_cast( - context->GetScratchBuffer(context, op_data->sorted_values_idx)); - - for (int col = 0; col < num_classes; col++) { - for (int row = 0; row < num_boxes; row++) { - // Get scores of boxes corresponding to all anchors for single class - class_scores[row] = - *(scores + row * num_classes_with_background + col + label_offset); - } - // Perform non-maximal suppression on single class - int selected_size = 0; - int* selected = reinterpret_cast( - context->GetScratchBuffer(context, op_data->selected_idx)); - TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper( - context, node, op_data, class_scores, selected, &selected_size, - num_detections_per_class)); - // Add selected indices from non-max suppression of boxes in this class - int output_index = size_of_sorted_indices; - for (int i = 0; i < selected_size; i++) { - int selected_index = selected[i]; - - box_indices_after_regular_non_max_suppression[output_index] = - (selected_index * num_classes_with_background + col + label_offset); - scores_after_regular_non_max_suppression[output_index] = - class_scores[selected_index]; - output_index++; - } - // Sort the max scores among the selected indices - // Get the indices for top scores - int num_indices_to_sort = std::min(output_index, max_detections); - DecreasingPartialArgSort(scores_after_regular_non_max_suppression, - output_index, num_indices_to_sort, sorted_indices); - - // Copy values to temporary vectors - for (int row = 0; row < num_indices_to_sort; row++) { - int temp = sorted_indices[row]; - sorted_indices[row] = box_indices_after_regular_non_max_suppression[temp]; - sorted_values[row] = scores_after_regular_non_max_suppression[temp]; - } - // Copy scores and indices from temporary vectors - for (int row = 0; row < num_indices_to_sort; row++) { - box_indices_after_regular_non_max_suppression[row] = sorted_indices[row]; - scores_after_regular_non_max_suppression[row] = sorted_values[row]; - } - size_of_sorted_indices = num_indices_to_sort; - } - - // Allocate output tensors - for (int output_box_index = 0; output_box_index < max_detections; - output_box_index++) { - if (output_box_index < size_of_sorted_indices) { - const int anchor_index = floor( - box_indices_after_regular_non_max_suppression[output_box_index] / - num_classes_with_background); - const int class_index = - box_indices_after_regular_non_max_suppression[output_box_index] - - anchor_index * num_classes_with_background - label_offset; - const float selected_score = - scores_after_regular_non_max_suppression[output_box_index]; - // detection_boxes - float* decoded_boxes = reinterpret_cast( - context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); - ReInterpretTensor(detection_boxes)[output_box_index] = - reinterpret_cast(decoded_boxes)[anchor_index]; - // detection_classes - tflite::micro::GetTensorData(detection_classes)[output_box_index] = - class_index; - // detection_scores - tflite::micro::GetTensorData(detection_scores)[output_box_index] = - selected_score; - } else { - ReInterpretTensor( - detection_boxes)[output_box_index] = {0.0f, 0.0f, 0.0f, 0.0f}; - // detection_classes - tflite::micro::GetTensorData(detection_classes)[output_box_index] = - 0.0f; - // detection_scores - tflite::micro::GetTensorData(detection_scores)[output_box_index] = - 0.0f; - } - } - tflite::micro::GetTensorData(num_detections)[0] = - size_of_sorted_indices; - - return kTfLiteOk; -} - -// This function implements a fast version of Non Maximal Suppression for -// multiple classes where -// 1) we keep the top-k scores for each anchor and -// 2) during NMS, each anchor only uses the highest class score for sorting. -// 3) Compared to standard NMS, the worst runtime of this version is O(N^2) -// instead of O(KN^2) where N is the number of anchors and K the number of -// classes. -TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context, - TfLiteNode* node, - OpData* op_data, - const float* scores) { - const TfLiteEvalTensor* input_box_encodings = - tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); - const TfLiteEvalTensor* input_class_predictions = - tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); - TfLiteEvalTensor* detection_boxes = - tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes); - - TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput( - context, node, kOutputTensorDetectionClasses); - TfLiteEvalTensor* detection_scores = - tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores); - TfLiteEvalTensor* num_detections = - tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections); - - const int num_boxes = input_box_encodings->dims->data[1]; - const int num_classes = op_data->num_classes; - const int max_categories_per_anchor = op_data->max_classes_per_detection; - const int num_classes_with_background = - input_class_predictions->dims->data[2]; - - // The row index offset is 1 if background class is included and 0 otherwise. - int label_offset = num_classes_with_background - num_classes; - TF_LITE_ENSURE(context, (max_categories_per_anchor > 0)); - const int num_categories_per_anchor = - std::min(max_categories_per_anchor, num_classes); - float* max_scores = reinterpret_cast( - context->GetScratchBuffer(context, op_data->score_buffer_idx)); - int* sorted_class_indices = reinterpret_cast( - context->GetScratchBuffer(context, op_data->buffer_idx)); - - for (int row = 0; row < num_boxes; row++) { - const float* box_scores = - scores + row * num_classes_with_background + label_offset; - int* class_indices = sorted_class_indices + row * num_classes; - DecreasingPartialArgSort(box_scores, num_classes, num_categories_per_anchor, - class_indices); - max_scores[row] = box_scores[class_indices[0]]; - } - - // Perform non-maximal suppression on max scores - int selected_size = 0; - int* selected = reinterpret_cast( - context->GetScratchBuffer(context, op_data->selected_idx)); - TF_LITE_ENSURE_STATUS(NonMaxSuppressionSingleClassHelper( - context, node, op_data, max_scores, selected, &selected_size, - op_data->max_detections)); - - // Allocate output tensors - int output_box_index = 0; - - for (int i = 0; i < selected_size; i++) { - int selected_index = selected[i]; - - const float* box_scores = - scores + selected_index * num_classes_with_background + label_offset; - const int* class_indices = - sorted_class_indices + selected_index * num_classes; - - for (int col = 0; col < num_categories_per_anchor; ++col) { - int box_offset = num_categories_per_anchor * output_box_index + col; - - // detection_boxes - float* decoded_boxes = reinterpret_cast( - context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); - ReInterpretTensor(detection_boxes)[box_offset] = - reinterpret_cast(decoded_boxes)[selected_index]; - - // detection_classes - tflite::micro::GetTensorData(detection_classes)[box_offset] = - class_indices[col]; - - // detection_scores - tflite::micro::GetTensorData(detection_scores)[box_offset] = - box_scores[class_indices[col]]; - - output_box_index++; - } - } - - tflite::micro::GetTensorData(num_detections)[0] = output_box_index; - return kTfLiteOk; -} - -void DequantizeClassPredictions(const TfLiteEvalTensor* input_class_predictions, - const int num_boxes, - const int num_classes_with_background, - float* scores, OpData* op_data) { - float quant_zero_point = - static_cast(op_data->input_class_predictions.zero_point); - float quant_scale = - static_cast(op_data->input_class_predictions.scale); - Dequantizer dequantize(quant_zero_point, quant_scale); - const uint8_t* scores_quant = - tflite::micro::GetTensorData(input_class_predictions); - for (int idx = 0; idx < num_boxes * num_classes_with_background; ++idx) { - scores[idx] = dequantize(scores_quant[idx]); - } -} - -TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, - TfLiteNode* node, OpData* op_data) { - // Get the input tensors - const TfLiteEvalTensor* input_box_encodings = - tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); - const TfLiteEvalTensor* input_class_predictions = - tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); - const int num_boxes = input_box_encodings->dims->data[1]; - const int num_classes = op_data->num_classes; - - TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0], - kBatchSize); - TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes); - const int num_classes_with_background = - input_class_predictions->dims->data[2]; - - TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1)); - TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes)); - - const float* scores; - switch (input_class_predictions->type) { - case kTfLiteUInt8: { - float* temporary_scores = reinterpret_cast( - context->GetScratchBuffer(context, op_data->scores_idx)); - DequantizeClassPredictions(input_class_predictions, num_boxes, - num_classes_with_background, temporary_scores, - op_data); - scores = temporary_scores; - } break; - case kTfLiteFloat32: - scores = tflite::micro::GetTensorData(input_class_predictions); - break; - default: - // Unsupported type. - return kTfLiteError; - } - - if (op_data->use_regular_non_max_suppression) { - TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClassRegularHelper( - context, node, op_data, scores)); - } else { - TF_LITE_ENSURE_STATUS( - NonMaxSuppressionMultiClassFastHelper(context, node, op_data, scores)); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE(context, (kBatchSize == 1)); - auto* op_data = static_cast(node->user_data); - - // These two functions correspond to two blocks in the Object Detection model. - // In future, we would like to break the custom op in two blocks, which is - // currently not feasible because we would like to input quantized inputs - // and do all calculations in float. Mixed quantized/float calculations are - // currently not supported in TFLite. - - // This fills in temporary decoded_boxes - // by transforming input_box_encodings and input_anchors from - // CenterSizeEncodings to BoxCornerEncoding - TF_LITE_ENSURE_STATUS(DecodeCenterSizeBoxes(context, node, op_data)); - - // This fills in the output tensors - // by choosing effective set of decoded boxes - // based on Non Maximal Suppression, i.e. selecting - // highest scoring non-overlapping boxes. - TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClass(context, node, op_data)); - - return kTfLiteOk; -} -} // namespace - -TfLiteRegistration* Register_DETECTION_POSTPROCESS() { - static TfLiteRegistration r = {/*init=*/Init, - /*free=*/Free, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; - return &r; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/elementwise.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/elementwise.cc deleted file mode 100644 index 581e532b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/elementwise.cc +++ /dev/null @@ -1,214 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace elementwise { -namespace { - -bool IsNumericSupportedType(const TfLiteType type) { - return type == kTfLiteFloat32; -} - -bool IsLogicalSupportedType(const TfLiteType type) { - return type == kTfLiteBool; -} - -typedef bool (*IsSupportedType)(TfLiteType); -template -TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (!IsSupportedType(input->type)) { - TF_LITE_KERNEL_LOG(context, "Input data type %s (%d) is not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -template -inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, - T func(T), TfLiteType expected_type) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); - const size_t num_elements = ElementCount(*input->dims); - const T* in_data = tflite::micro::GetTensorData(input); - T* out_data = tflite::micro::GetTensorData(output); - for (size_t i = 0; i < num_elements; ++i) { - out_data[i] = func(in_data[i]); - } - return kTfLiteOk; -} - -inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, - float float_func(float)) { - return EvalImpl(context, node, float_func, kTfLiteFloat32); -} - -inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, - bool bool_func(bool)) { - return EvalImpl(context, node, bool_func, kTfLiteBool); -} - -TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::abs); -} - -TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::sin); -} - -TfLiteStatus CosEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::cos); -} - -TfLiteStatus LogEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::log); -} - -TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::sqrt); -} - -TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, [](float f) { return 1.f / std::sqrt(f); }); -} - -TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, [](float f) { return f * f; }); -} - -TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { - return EvalLogical(context, node, [](bool v) { return !v; }); -} - -} // namespace -} // namespace elementwise - -TfLiteRegistration Register_ABS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::AbsEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SIN() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SinEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_COS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::CosEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOG() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::LogEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SQRT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SqrtEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_RSQRT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::RsqrtEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SQUARE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SquareEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOGICAL_NOT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::LogicalNotEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ethosu.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ethosu.cc deleted file mode 100644 index c305121e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ethosu.cc +++ /dev/null @@ -1,27 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// -// This is a stub file for non-Ethos platforms -// -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -TfLiteRegistration* Register_ETHOSU() { return nullptr; } - -const char* GetString_ETHOSU() { return ""; } - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ethosu.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ethosu.h deleted file mode 100644 index cfbb0d3f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/ethosu.h +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_ - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -TfLiteRegistration* Register_ETHOSU(); - -const char* GetString_ETHOSU(); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_ETHOSU_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cc deleted file mode 100644 index 106deec7..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.cc +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// This file is generated. See: -// tensorflow/lite/micro/kernels/detection_postprocess_test/README.md - -#include "tensorflow/lite/micro/kernels/flexbuffers_generated_data.h" - -const int g_gen_data_size_none_regular_nms = 242; -const unsigned char g_gen_data_none_regular_nms[] = { - 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x00, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x00, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x75, 0x6c, 0x61, - 0x72, 0x5f, 0x6e, 0x6d, 0x73, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x73, 0x63, - 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x69, 0x6f, 0x75, 0x5f, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x00, 0x6e, 0x75, 0x6d, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x00, 0x79, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x78, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, - 0x68, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, 0x77, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x0b, 0x78, 0x12, 0x94, 0xa4, 0x43, 0x58, 0x33, - 0x6a, 0x11, 0x22, 0x2b, 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x20, 0x41, - 0x06, 0x0e, 0x06, 0x06, 0x0e, 0x0e, 0x06, 0x6a, 0x0e, 0x0e, 0x0e, 0x37, - 0x26, 0x01, -}; -const int g_gen_data_size_regular_nms = 242; -const unsigned char g_gen_data_regular_nms[] = { - 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x00, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x00, 0x75, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x75, 0x6c, 0x61, - 0x72, 0x5f, 0x6e, 0x6d, 0x73, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x73, 0x63, - 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x00, 0x6e, 0x6d, 0x73, 0x5f, 0x69, 0x6f, 0x75, 0x5f, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x00, 0x6e, 0x75, 0x6d, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x65, 0x73, 0x00, 0x79, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x78, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, - 0x68, 0x5f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x00, 0x77, 0x5f, 0x73, 0x63, - 0x61, 0x6c, 0x65, 0x00, 0x0b, 0x78, 0x12, 0x94, 0xa4, 0x43, 0x58, 0x33, - 0x6a, 0x11, 0x22, 0x2b, 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x40, - 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0xa0, 0x40, 0x00, 0x00, 0x20, 0x41, 0x00, 0x00, 0x20, 0x41, - 0x06, 0x0e, 0x06, 0x06, 0x0e, 0x0e, 0x06, 0x6a, 0x0e, 0x0e, 0x0e, 0x37, - 0x26, 0x01, -}; diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h deleted file mode 100644 index f5b9eae0..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/flexbuffers_generated_data.h +++ /dev/null @@ -1,25 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H -#define TENSORFLOW_LITE_MICRO_KERNELS_FLEXBUFFERS_GENERATED_DATA_H - -extern const int g_gen_data_size_none_regular_nms; -extern const unsigned char g_gen_data_none_regular_nms[]; - -extern const int g_gen_data_size_regular_nms; -extern const unsigned char g_gen_data_regular_nms[]; - -#endif diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/floor.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/floor.cc deleted file mode 100644 index b8be1cf0..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/floor.cc +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/floor.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace floor { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - reference_ops::Floor(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -} -} // namespace floor - -TfLiteRegistration Register_FLOOR() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/floor::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/fully_connected.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/fully_connected.cc deleted file mode 100644 index d3fdeacb..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/fully_connected.cc +++ /dev/null @@ -1,253 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/fully_connected.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/fully_connected.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -struct OpData { - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - // The index of the temporary tensor where the quantized inputs are cached. - int input_quantized_index; - // Cached zero point values of tensors. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; -}; - -constexpr int kInputTensor = 0; -constexpr int kWeightsTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -TfLiteStatus CalculateOpData(TfLiteContext* context, - TfLiteFusedActivation activation, - TfLiteType data_type, const TfLiteTensor* input, - const TfLiteTensor* filter, - const TfLiteTensor* bias, TfLiteTensor* output, - OpData* data) { - TfLiteStatus status = kTfLiteOk; - if (data_type != kTfLiteFloat32) { - double real_multiplier = 0.0; - TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( - context, input, filter, bias, output, &real_multiplier)); - int exponent; - QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); - data->output_shift = -exponent; - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, activation, output, &data->output_activation_min, - &data->output_activation_max)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - } - return status; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - return CalculateOpData(context, params->activation, input->type, input, - filter, bias, output, data); -} - -TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - tflite::FullyConnectedParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = -data.filter_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.output_multiplier = data.output_multiplier; - // TODO(b/138810107): Figure out whether output shift should be inverted - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::FullyConnectedParams op_params; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - -#define TF_LITE_FULLY_CONNECTED(output_data_type) \ - reference_ops::FullyConnected( \ - op_params, tflite::micro::GetTensorShape(input), \ - tflite::micro::GetTensorData(input), \ - tflite::micro::GetTensorShape(filter), \ - tflite::micro::GetTensorData(filter), \ - tflite::micro::GetTensorShape(bias), \ - tflite::micro::GetTensorData(bias), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - switch (output->type) { - case kTfLiteUInt8: - TF_LITE_FULLY_CONNECTED(uint8_t); - break; - case kTfLiteInt16: - TF_LITE_FULLY_CONNECTED(int16_t); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteFusedActivation activation, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(activation, &output_activation_min, - &output_activation_max); - tflite::FullyConnectedParams op_params; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - tflite::reference_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto* params = - static_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - switch (input->type) { - case kTfLiteFloat32: - return EvalFloat(context, node, params->activation, input, filter, bias, - output); - case kTfLiteInt8: - return EvalQuantizedInt8(context, node, data, input, filter, bias, - output); - - case kTfLiteUInt8: - return EvalQuantized(context, node, data, input, filter, bias, output); - - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_FULLY_CONNECTED() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/fully_connected.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/fully_connected.h deleted file mode 100644 index 3e646718..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/fully_connected.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -// This is the most generic TfLiteRegistration. The actual supported types may -// still be target dependent. The only requirement is that every implementation -// (reference or optimized) must define this function. -TfLiteRegistration Register_FULLY_CONNECTED(); - -#if defined(CMSIS_NN) || defined(ARDUINO) -// The Arduino is a special case where we use the CMSIS kernels, but because of -// the current approach to building for Arduino, we do not support -DCMSIS_NN as -// part of the build. As a result, we use defined(ARDUINO) as proxy for the -// CMSIS kernels for this one special case. - -// Returns a TfLiteRegistration struct for cmsis-nn kernel variant that only -// supports int8. -TfLiteRegistration Register_FULLY_CONNECTED_INT8(); - -#else -// Note that while this block gets used for both reference and optimized kernels -// that do not have any specialized implementations, the only goal here is to -// define fallback implementation that allow reference kernels to still be used -// from applications that call a more specific kernel variant. - -inline TfLiteRegistration Register_FULLY_CONNECTED_INT8() { - return Register_FULLY_CONNECTED(); -} - -#endif -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/hard_swish.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/hard_swish.cc deleted file mode 100644 index a0a245f8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/hard_swish.cc +++ /dev/null @@ -1,142 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/hard_swish.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace hard_swish { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(HardSwishParams)); -} - -TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { - HardSwishParams* params = static_cast(node->user_data); - - params->input_zero_point = input->params.zero_point; - params->output_zero_point = output->params.zero_point; - - const float input_scale = input->params.scale; - const float hires_input_scale = (1.0f / 128.0f) * input_scale; - const float reluish_scale = 3.0f / 32768.0f; - const float output_scale = output->params.scale; - - const double output_multiplier = - static_cast(hires_input_scale / output_scale); - int32_t output_multiplier_fixedpoint_int32; - QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, - ¶ms->output_multiplier_exponent); - DownScaleInt32ToInt16Multiplier( - output_multiplier_fixedpoint_int32, - ¶ms->output_multiplier_fixedpoint_int16); - - TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); - - const double reluish_multiplier = - static_cast(hires_input_scale / reluish_scale); - int32_t reluish_multiplier_fixedpoint_int32; - QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, - ¶ms->reluish_multiplier_exponent); - DownScaleInt32ToInt16Multiplier( - reluish_multiplier_fixedpoint_int32, - ¶ms->reluish_multiplier_fixedpoint_int16); - } - - return kTfLiteOk; -} - -TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - HardSwishParams* params = static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::HardSwish( - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - case kTfLiteUInt8: { - tflite::reference_ops::HardSwish( - *params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - case kTfLiteInt8: { - tflite::reference_ops::HardSwish( - *params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - default: { - TF_LITE_KERNEL_LOG( - context, - "Only float32/int8_t/uint8_t are supported currently, got %s", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -} // namespace hard_swish - -TfLiteRegistration Register_HARD_SWISH() { - return {/*init=*/hard_swish::HardSwishInit, - /*free=*/nullptr, - /*prepare=*/hard_swish::HardSwishPrepare, - /*invoke=*/hard_swish::HardSwishEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.cc deleted file mode 100644 index 5d034791..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.cc +++ /dev/null @@ -1,166 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/kernel_runner.h" - -namespace tflite { -namespace micro { - -namespace { -constexpr size_t kBufferAlignment = 16; -} // namespace - -// TODO(b/161841696): Consider moving away from global arena buffers: -constexpr int KernelRunner::kNumScratchBuffers_; -constexpr int KernelRunner::kKernelRunnerBufferSize_; -uint8_t KernelRunner::kKernelRunnerBuffer_[]; - -KernelRunner::KernelRunner(const TfLiteRegistration& registration, - TfLiteTensor* tensors, int tensors_size, - TfLiteIntArray* inputs, TfLiteIntArray* outputs, - void* builtin_data, ErrorReporter* error_reporter) - : allocator_(SimpleMemoryAllocator::Create( - error_reporter, kKernelRunnerBuffer_, kKernelRunnerBufferSize_)), - registration_(registration), - tensors_(tensors), - error_reporter_(error_reporter) { - // Prepare TfLiteContext: - context_.impl_ = static_cast(this); - context_.ReportError = ReportOpError; - context_.recommended_num_threads = 1; - context_.GetTensor = GetTensor; - context_.GetEvalTensor = GetEvalTensor; - context_.AllocatePersistentBuffer = AllocatePersistentBuffer; - context_.RequestScratchBufferInArena = RequestScratchBufferInArena; - context_.GetScratchBuffer = GetScratchBuffer; - - // Prepare TfLiteNode: - node_.inputs = inputs; - node_.outputs = outputs; - node_.builtin_data = builtin_data; -} - -TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, - size_t length) { - if (registration_.init) { - node_.user_data = registration_.init(&context_, init_data, length); - } - if (registration_.prepare) { - TF_LITE_ENSURE_STATUS(registration_.prepare(&context_, &node_)); - } - return kTfLiteOk; -} - -TfLiteStatus KernelRunner::Invoke() { - if (registration_.invoke == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "TfLiteRegistration missing invoke function pointer!"); - return kTfLiteError; - } - return registration_.invoke(&context_, &node_); -} - -TfLiteTensor* KernelRunner::GetTensor(const struct TfLiteContext* context, - int tensor_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - return &runner->tensors_[tensor_index]; -} - -TfLiteEvalTensor* KernelRunner::GetEvalTensor( - const struct TfLiteContext* context, int tensor_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - TfLiteEvalTensor* eval_tensor = - reinterpret_cast(runner->allocator_->AllocateTemp( - sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); - TFLITE_DCHECK(eval_tensor != nullptr); - - // In unit tests, the TfLiteTensor pointer contains the source of truth for - // buffers and values: - eval_tensor->data = runner->tensors_[tensor_index].data; - eval_tensor->dims = runner->tensors_[tensor_index].dims; - eval_tensor->type = runner->tensors_[tensor_index].type; - return eval_tensor; -} - -void* KernelRunner::AllocatePersistentBuffer(TfLiteContext* context, - size_t bytes) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - return runner->allocator_->AllocateFromTail(bytes, kBufferAlignment); -} - -TfLiteStatus KernelRunner::RequestScratchBufferInArena(TfLiteContext* context, - size_t bytes, - int* buffer_index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(buffer_index != nullptr); - - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - if (runner->scratch_buffer_count_ == kNumScratchBuffers_) { - TF_LITE_REPORT_ERROR( - runner->error_reporter_, - "Exceeded the maximum number of scratch tensors allowed (%d).", - kNumScratchBuffers_); - return kTfLiteError; - } - - // For tests, we allocate scratch buffers from the tail and keep them around - // for the lifetime of model. This means that the arena size in the tests will - // be more than what we would have if the scratch buffers could share memory. - runner->scratch_buffers_[runner->scratch_buffer_count_] = - runner->allocator_->AllocateFromTail(bytes, kBufferAlignment); - TFLITE_DCHECK(runner->scratch_buffers_[runner->scratch_buffer_count_] != - nullptr); - - *buffer_index = runner->scratch_buffer_count_++; - return kTfLiteOk; -} - -void* KernelRunner::GetScratchBuffer(TfLiteContext* context, int buffer_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - TFLITE_DCHECK(runner->scratch_buffer_count_ <= kNumScratchBuffers_); - if (buffer_index >= runner->scratch_buffer_count_) { - return nullptr; - } - return runner->scratch_buffers_[buffer_index]; -} - -void KernelRunner::ReportOpError(struct TfLiteContext* context, - const char* format, ...) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(runner->error_reporter_, format, args); - va_end(args); -} - -} // namespace micro -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.h deleted file mode 100644 index 34a2149d..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_runner.h +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" - -namespace tflite { -namespace micro { - -// Helper class to perform a simulated kernel (i.e. TfLiteRegistration) -// lifecycle (init, prepare, invoke). All internal allocations are handled by -// this class. Simply pass in the registration, list of required tensors, inputs -// array, outputs array, and any pre-builtin data. Calling Invoke() will -// automatically walk the kernel and outputs will be ready on the TfLiteTensor -// output provided during construction. -class KernelRunner { - public: - KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, - int tensors_size, TfLiteIntArray* inputs, - TfLiteIntArray* outputs, void* builtin_data, - ErrorReporter* error_reporter); - - // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any - // exceptions will be reported through the error_reporter and returned as a - // status code here. - TfLiteStatus InitAndPrepare(const char* init_data = nullptr, - size_t length = 0); - - // Calls init, prepare, and invoke on a given TfLiteRegistration pointer. - // After successful invoke, results will be available in the output tensor as - // passed into the constructor of this class. - TfLiteStatus Invoke(); - - protected: - static TfLiteTensor* GetTensor(const struct TfLiteContext* context, - int tensor_index); - static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, - int tensor_index); - static void* AllocatePersistentBuffer(TfLiteContext* context, size_t bytes); - static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* context, - size_t bytes, - int* buffer_index); - static void* GetScratchBuffer(TfLiteContext* context, int buffer_index); - static void ReportOpError(struct TfLiteContext* context, const char* format, - ...); - - private: - static constexpr int kNumScratchBuffers_ = 12; - - static constexpr int kKernelRunnerBufferSize_ = 10000; - static uint8_t kKernelRunnerBuffer_[kKernelRunnerBufferSize_]; - - SimpleMemoryAllocator* allocator_ = nullptr; - const TfLiteRegistration& registration_; - TfLiteTensor* tensors_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; - - TfLiteContext context_ = {}; - TfLiteNode node_ = {}; - - int scratch_buffer_count_ = 0; - uint8_t* scratch_buffers_[kNumScratchBuffers_]; -}; - -} // namespace micro -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_RUNNER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_util.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_util.cc deleted file mode 100644 index deca92b6..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_util.cc +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -#include "tensorflow/lite/c/common.h" - -namespace tflite { -namespace micro { - -bool HaveSameShapes(const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2) { - TFLITE_DCHECK(input1 != nullptr); - TFLITE_DCHECK(input2 != nullptr); - return TfLiteIntArrayEqual(input1->dims, input2->dims); -} - -const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { - if (tensor == nullptr || tensor->dims == nullptr) { - return RuntimeShape(); - } - TfLiteIntArray* dims = tensor->dims; - const int dims_size = dims->size; - const int32_t* dims_data = reinterpret_cast(dims->data); - return RuntimeShape(dims_size, dims_data); -} - -} // namespace micro -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_util.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_util.h deleted file mode 100644 index 79cd58ec..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/kernel_util.h +++ /dev/null @@ -1,75 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace micro { - -// Returns a mutable tensor for a given input index. is_variable must be checked -// during prepare when the full TfLiteTensor is available. -inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, - const TfLiteNode* node, - int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->inputs->data[index]); -} - -// Returns the TfLiteEvalTensor struct for a given input index in a node. -inline const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - return GetMutableEvalInput(context, node, index); -} - -// Returns the TfLiteEvalTensor struct for a given output index in a node. -inline TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->outputs->data[index]); -} - -// Returns data for a TfLiteEvalTensor struct. -template -T* GetTensorData(TfLiteEvalTensor* tensor) { - return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; -} - -// Returns const data for a TfLiteEvalTensor struct. -template -const T* GetTensorData(const TfLiteEvalTensor* tensor) { - TFLITE_DCHECK(tensor != nullptr); - return reinterpret_cast(tensor->data.raw); -} - -// Returns the shape of a TfLiteEvalTensor struct. -const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); - -// Return true if the given tensors have the same shape. -bool HaveSameShapes(const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2); - -} // namespace micro -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_KERNEL_UTIL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/l2norm.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/l2norm.cc deleted file mode 100644 index 401741a0..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/l2norm.cc +++ /dev/null @@ -1,157 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h" -#include "tensorflow/lite/kernels/internal/reference/l2normalization.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace l2norm { - -namespace { - -// This file has two implementation of L2Norm. -enum KernelType { - kReference, - kGenericOptimized, -}; - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -} // namespace - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = reinterpret_cast(node->builtin_data); - L2NormalizationParams* data = - static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, NumDimensions(input) <= 4); - - TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 || - output->type == kTfLiteUInt8 || - output->type == kTfLiteInt8); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - data->input_zero_point = input->params.zero_point; - } else if (output->type == kTfLiteFloat32) { - data->input_zero_point = 0; - } - - // TODO(ahentz): For some reason our implementations don't support - // activations. - TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, - sizeof(L2NormalizationParams)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const L2NormalizationParams& data = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // TODO(b/143912164): instead of hardcode the epsilon here, we should read it - // from tensorflow, i.e., adding a params. - // We don't compute epsilon for quantized kernel: - // - // epsilon_float = (epsilon_quant - zp) * scale - // so - // espsilon_quant = epsilon_float / scale + zp - // We know epsilon_float is just a very small number to avoid division by - // zero error, and scale is > 1, so the integer value of epsilon for quant - // is just dominated by the zero point. - // Also, GetInvSqrtQuantizedMultiplierExp handles the scenario where the sum - // of input value squared is zero case well. - // So we don't even need to do handle the epsilon for quantized kernel case. - const float epsilon = 1e-6f; - if (output->type == kTfLiteFloat32) { - reference_ops::L2Normalization(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - epsilon); - } else if (output->type == kTfLiteUInt8) { - reference_ops::L2Normalization( - data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt8) { - const auto input_shape = tflite::micro::GetTensorShape(input); - const auto output_shape = tflite::micro::GetTensorShape(output); - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - reference_integer_ops::L2Normalization( - data.input_zero_point, outer_size, depth, - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_KERNEL_LOG(context, "Output type is %s, requires float.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace l2norm - -TfLiteRegistration Register_L2NORM_REF() { - return {/*init=*/l2norm::Init, - /*free=*/nullptr, - /*prepare=*/l2norm::Prepare, - /*invoke=*/l2norm::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); } - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/logical.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/logical.cc deleted file mode 100644 index f4033ba8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/logical.cc +++ /dev/null @@ -1,105 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/reference/binary_function.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace logical { -namespace { - -// Input/output tensor index. -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, - bool (*func)(bool, bool)) { - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (tflite::micro::HaveSameShapes(input1, input2)) { - reference_ops::BinaryFunction( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), func); - } else { - reference_ops::BroadcastBinaryFunction4DSlow( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), func); - } - - return kTfLiteOk; -} - -bool LogicalOr(bool x, bool y) { return x || y; } - -TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) { - return LogicalImpl(context, node, LogicalOr); -} - -bool LogicalAnd(bool x, bool y) { return x && y; } - -TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { - return LogicalImpl(context, node, LogicalAnd); -} - -} // namespace -} // namespace logical - -TfLiteRegistration Register_LOGICAL_OR() { - // Init, Free, Prepare, Eval are satisfying the Interface required by - // TfLiteRegistration. - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/logical::LogicalOrEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOGICAL_AND() { - // Init, Free, Prepare, Eval are satisfying the Interface required by - // TfLiteRegistration. - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/logical::LogicalAndEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/logistic.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/logistic.cc deleted file mode 100644 index 3fa81ba8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/logistic.cc +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/logistic.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, - OpData* data) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, - std::numeric_limits::min()); - - static constexpr int kInputIntegerBits = 4; - const double input_real_multiplier = - static_cast(input->params.scale) * - static_cast(1 << (31 - kInputIntegerBits)); - - data->input_zero_point = input->params.zero_point; - - const double q = std::frexp(input_real_multiplier, &data->input_left_shift); - data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); - - data->input_range_radius = - CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); - } - return kTfLiteOk; -} -} // namespace - -void* LogisticInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - return CalculateArithmeticOpData(context, node, data); -} - -TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - if (input->type == kTfLiteFloat32) { - switch (output->type) { - case kTfLiteFloat32: { - reference_ops::Logistic(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt8) { - switch (output->type) { - case kTfLiteInt8: { - reference_integer_ops::Logistic( - data->input_zero_point, data->input_range_radius, - data->input_multiplier, data->input_left_shift, - NumElements(input->dims), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - // TODO(b/141211002): Also support other data types once we have supported - // temporary tensors in TFLM. - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace activations - -TfLiteRegistration Register_LOGISTIC() { - return {/*init=*/activations::LogisticInit, - /*free=*/nullptr, - /*prepare=*/activations::LogisticPrepare, - /*invoke=*/activations::LogisticEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/maximum_minimum.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/maximum_minimum.cc deleted file mode 100644 index a7c343bf..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/maximum_minimum.cc +++ /dev/null @@ -1,148 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/maximum_minimum.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace maximum_minimum { -namespace { - -// This file has a reference implementation of TFMaximum/TFMinimum. -enum KernelType { - kReference, -}; - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpContext { - OpContext(TfLiteContext* context, TfLiteNode* node) { - input1 = tflite::micro::GetEvalInput(context, node, kInputTensor1); - input2 = tflite::micro::GetEvalInput(context, node, kInputTensor2); - output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); - } - const TfLiteEvalTensor* input1; - const TfLiteEvalTensor* input2; - TfLiteEvalTensor* output; -}; - -struct MaximumOp { - template - static data_type op(data_type el1, data_type el2) { - return el1 > el2 ? el1 : el2; - } -}; - -struct MinimumOp { - template - static data_type op(data_type el1, data_type el2) { - return el1 < el2 ? el1 : el2; - } -}; - -} // namespace - -template -void TFLiteOperation(TfLiteContext* context, TfLiteNode* node, - const OpContext& op_context) { - reference_ops::MaximumMinimumBroadcastSlow( - tflite::micro::GetTensorShape(op_context.input1), - tflite::micro::GetTensorData(op_context.input1), - tflite::micro::GetTensorShape(op_context.input2), - tflite::micro::GetTensorData(op_context.input2), - tflite::micro::GetTensorShape(op_context.output), - tflite::micro::GetTensorData(op_context.output), - op_type::template op); -} - -template -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - OpContext op_context(context, node); - - if (kernel_type == kReference) { - switch (op_context.output->type) { - case kTfLiteFloat32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt8: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt32: - TFLiteOperation(context, node, op_context); - break; - case kTfLiteInt64: - TFLiteOperation(context, node, op_context); - break; - default: - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by Maximum/Minimum.", - TfLiteTypeGetName(op_context.output->type), - op_context.output->type); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, - "Kernel type not supported by Maximum/Minimum."); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace maximum_minimum - -TfLiteRegistration Register_MAXIMUM() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/ - maximum_minimum::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_MINIMUM() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/ - maximum_minimum::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/micro_ops.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/micro_ops.h deleted file mode 100644 index f73b1d59..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/micro_ops.h +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ - -#include "tensorflow/lite/c/common.h" - -// Forward declaration of all micro op kernel registration methods. These -// registrations are included with the standard `BuiltinOpResolver`. -// -// This header is particularly useful in cases where only a subset of ops are -// needed. In such cases, the client can selectively add only the registrations -// their model requires, using a custom `(Micro)MutableOpResolver`. Selective -// registration in turn allows the linker to strip unused kernels. - -namespace tflite { - -// TFLM is incrementally moving towards a flat tflite namespace -// (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should -// have their Register function declarations in the tflite namespace. - -TfLiteRegistration Register_CONV_2D(); -TfLiteRegistration Register_DEPTHWISE_CONV_2D(); -TfLiteRegistration Register_QUANTIZE(); -TfLiteRegistration Register_SHAPE(); -TfLiteRegistration Register_SOFTMAX(); -TfLiteRegistration Register_SVDF(); -TfLiteRegistration Register_TRANSPOSE_CONV_2D(); - -namespace ops { -namespace micro { - -TfLiteRegistration Register_ABS(); -TfLiteRegistration Register_ADD(); -TfLiteRegistration Register_ARG_MAX(); -TfLiteRegistration Register_ARG_MIN(); -TfLiteRegistration Register_AVERAGE_POOL_2D(); -TfLiteRegistration Register_CEIL(); -// TODO(b/160234179): Change custom OPs to also return by value. -TfLiteRegistration* Register_CIRCULAR_BUFFER(); -TfLiteRegistration Register_CONCATENATION(); -TfLiteRegistration Register_COS(); -TfLiteRegistration Register_DEQUANTIZE(); -TfLiteRegistration Register_EQUAL(); -TfLiteRegistration Register_FLOOR(); -TfLiteRegistration Register_GREATER(); -TfLiteRegistration Register_GREATER_EQUAL(); -TfLiteRegistration Register_HARD_SWISH(); -TfLiteRegistration Register_LESS(); -TfLiteRegistration Register_LESS_EQUAL(); -TfLiteRegistration Register_LOG(); -TfLiteRegistration Register_LOGICAL_AND(); -TfLiteRegistration Register_LOGICAL_NOT(); -TfLiteRegistration Register_LOGICAL_OR(); -TfLiteRegistration Register_LOGISTIC(); -TfLiteRegistration Register_MAXIMUM(); -TfLiteRegistration Register_MAX_POOL_2D(); -TfLiteRegistration Register_MEAN(); -TfLiteRegistration Register_MINIMUM(); -TfLiteRegistration Register_MUL(); -TfLiteRegistration Register_NEG(); -TfLiteRegistration Register_NOT_EQUAL(); -TfLiteRegistration Register_PACK(); -TfLiteRegistration Register_PAD(); -TfLiteRegistration Register_PADV2(); -TfLiteRegistration Register_PRELU(); -TfLiteRegistration Register_REDUCE_MAX(); -TfLiteRegistration Register_RELU(); -TfLiteRegistration Register_RELU6(); -TfLiteRegistration Register_RESHAPE(); -TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR(); -TfLiteRegistration Register_ROUND(); -TfLiteRegistration Register_RSQRT(); -TfLiteRegistration Register_SIN(); -TfLiteRegistration Register_SPLIT(); -TfLiteRegistration Register_SPLIT_V(); -TfLiteRegistration Register_SQRT(); -TfLiteRegistration Register_SQUARE(); -TfLiteRegistration Register_STRIDED_SLICE(); -TfLiteRegistration Register_SUB(); -TfLiteRegistration Register_UNPACK(); -TfLiteRegistration Register_L2_NORMALIZATION(); -TfLiteRegistration Register_TANH(); - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_OPS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/micro_utils.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/micro_utils.h deleted file mode 100644 index e406ac12..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/micro_utils.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ -namespace tflite { -namespace ops { -namespace micro { - -// Same as gtl::Greater but defined here to reduce dependencies and -// binary size for micro environment. -struct Greater { - template - bool operator()(const T& x, const T& y) const { - return x > y; - } -}; - -struct Less { - template - bool operator()(const T& x, const T& y) const { - return x < y; - } -}; - -} // namespace micro -} // namespace ops -} // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/mul.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/mul.cc deleted file mode 100644 index b3f3bd4f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/mul.cc +++ /dev/null @@ -1,236 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/mul.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace mul { -namespace { - -constexpr int kInput1Tensor = 0; -constexpr int kInput2Tensor = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input1_zero_point; - int32_t input2_zero_point; - - int32_t output_activation_min; - int32_t output_activation_max; - int32_t output_zero_point; - int32_t output_multiplier; - int output_shift; - - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, OpData* data) { - const TfLiteTensor* input1 = GetInput(context, node, kInput1Tensor); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInput2Tensor); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - - double real_multiplier = static_cast(input1->params.scale) * - static_cast(input2->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->output_multiplier, - &data->output_shift); - - data->input1_zero_point = input1->params.zero_point; - data->input2_zero_point = input2->params.zero_point; - data->output_zero_point = output->params.zero_point; - } else { - CalculateActivationRange(params->activation, - &data->output_activation_min_f32, - &data->output_activation_max_f32); - } - - return kTfLiteOk; -} - -} // namespace - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - op_params.quantized_activation_min = data->output_activation_min; - op_params.quantized_activation_max = data->output_activation_max; - op_params.float_activation_max = data->output_activation_max_f32; - op_params.input1_offset = -data->input1_zero_point; - op_params.input2_offset = -data->input2_zero_point; - op_params.output_offset = data->output_zero_point; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - reference_integer_ops::BroadcastMul4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::Mul(op_params, - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else if (output->type == kTfLiteUInt8) { - if (need_broadcast) { - reference_integer_ops::BroadcastMul4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::Mul(op_params, - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, const OpData* data, - const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - op_params.float_activation_min = data->output_activation_min_f32; - op_params.float_activation_max = data->output_activation_max_f32; - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (need_broadcast) { - reference_ops::BroadcastMul4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Mul(op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - return CalculateOpData(context, node, params, data); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInput1Tensor); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInput2Tensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input1->type) { - case kTfLiteUInt8: - case kTfLiteInt8: - EvalQuantized(context, node, data, input1, input2, output); - break; - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input1, input2, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - - return kTfLiteOk; -} -} // namespace mul - -TfLiteRegistration Register_MUL() { - return {/*init=*/mul::Init, - /*free=*/nullptr, - /*prepare=*/mul::Prepare, - /*invoke=*/mul::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/neg.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/neg.cc deleted file mode 100644 index 74a95ca3..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/neg.cc +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/neg.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace neg { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (input->type) { - // TODO(wangtz): handle for kTfLiteInt8 - case kTfLiteFloat32: - reference_ops::Negate(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace neg - -TfLiteRegistration Register_NEG() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/neg::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pack.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pack.cc deleted file mode 100644 index d332fc63..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pack.cc +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pack { -namespace { - -constexpr int kOutputTensor = 0; - -template -TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node, - TfLiteEvalTensor* output, int values_count, int axis) { - const TfLiteEvalTensor* input0 = - tflite::micro::GetEvalInput(context, node, 0); - - const int dimensions = output->dims->size; - const TfLiteIntArray* input_dims = input0->dims; - const TfLiteIntArray* output_dims = output->dims; - - if (axis < 0) { - axis += dimensions; - } - - int outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= output_dims->data[i]; - } - int copy_size = 1; - for (int i = axis + 1; i < dimensions; ++i) { - copy_size *= output_dims->data[i]; - } - int input_size = 1; - for (int i = 0; i < input_dims->size; ++i) { - input_size *= input_dims->data[i]; - } - TFLITE_DCHECK_EQ(input_size, copy_size * outer_size); - - T* output_data = tflite::micro::GetTensorData(output); - - for (int i = 0; i < values_count; ++i) { - const TfLiteEvalTensor* t = tflite::micro::GetEvalInput(context, node, i); - const T* input_data = tflite::micro::GetTensorData(t); - for (int k = 0; k < outer_size; ++k) { - const T* input_ptr = input_data + copy_size * k; - int loc = k * values_count * copy_size + i * copy_size; - T* output_ptr = output_data + loc; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLitePackParams* data = - reinterpret_cast(node->builtin_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (output->type) { - case kTfLiteFloat32: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteUInt8: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteInt8: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteInt32: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - case kTfLiteInt64: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } - default: { - TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } - - return kTfLiteOk; -} - -} // namespace -} // namespace pack - -TfLiteRegistration Register_PACK() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/pack::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pad.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pad.cc deleted file mode 100644 index 5d9d4364..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pad.cc +++ /dev/null @@ -1,254 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/pad.h" - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/portable_tensor.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pad { -namespace { - -struct OpData { - PadParams params; - int32_t output_zero_point; -}; - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, /*index=*/0); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* paddings = GetInput(context, node, /*index=*/1); - TF_LITE_ENSURE(context, paddings != nullptr); - const TfLiteTensor* constant_values = - NumInputs(node) == 3 ? GetInput(context, node, /*index=*/2) : nullptr; - TfLiteTensor* output = GetOutput(context, node, /*index=*/0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - - // Current implementations rely on the inputs being <= 4D. - TF_LITE_ENSURE(context, NumDimensions(input) <= - reference_ops::PadKernelMaxDimensionCount()); - - if (constant_values != nullptr) { - TF_LITE_ENSURE_EQ(context, input->type, constant_values->type); - // Ensure that constant_values is a scalar. - TF_LITE_ENSURE_EQ(context, NumElements(constant_values), 1); - } - - // There must be a pair of paddings for each output dimension. - TF_LITE_ENSURE_EQ(context, GetTensorShape(paddings).FlatSize(), - output->dims->size * 2); - - // On Micro, outputs must be properly sized by the converter. - // NOTE: This data is only available because the paddings buffer is stored in - // the flatbuffer: - TF_LITE_ENSURE(context, IsConstantTensor(paddings)); - const int32_t* paddings_data = GetTensorData(paddings); - for (int i = 0; i < output->dims->size; i++) { - int output_dim = output->dims->data[i]; - int expected_dim = - input->dims->data[i] + paddings_data[i * 2] + paddings_data[i * 2 + 1]; - TF_LITE_ENSURE_EQ(context, output_dim, expected_dim); - } - - // Calculate OpData: - data->params.resizing_category = ResizingCategory::kGenericResize; - const int paddings_total = GetTensorShape(paddings).FlatSize(); - if (paddings_total == 8 && (paddings_data[0] == 0 && paddings_data[1] == 0) && - (paddings_data[6] == 0 && paddings_data[7] == 0)) { - data->params.resizing_category = ResizingCategory::kImageStyle; - } - - const int num_input_dimensions = NumDimensions(input); - data->params.left_padding_count = num_input_dimensions; - data->params.right_padding_count = num_input_dimensions; - - for (int idx = num_input_dimensions - 1; idx >= 0; --idx) { - data->params.left_padding[idx] = paddings_data[idx * 2]; - data->params.right_padding[idx] = paddings_data[idx * 2 + 1]; - } - - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - if (constant_values == nullptr) { - // Quantized Pad requires that 0 is represented in the quantized - // range. - if (input->type == kTfLiteUInt8) { - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } else { - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } - } else { - // Quantized Pad requires that 'constant_values' is represented in the - // same quantized range as the input and output tensors. - TF_LITE_ENSURE_EQ(context, output->params.zero_point, - constant_values->params.zero_point); - TF_LITE_ENSURE_EQ(context, static_cast(output->params.scale), - static_cast(constant_values->params.scale)); - } - data->output_zero_point = output->params.zero_point; - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, /*index=*/0); - const TfLiteEvalTensor* constant_values = - NumInputs(node) == 3 - ? tflite::micro::GetEvalInput(context, node, /*index=*/2) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, /*index=*/0); - - switch (input->type) { - case kTfLiteFloat32: { - float pad_value = - constant_values == nullptr - ? 0.f - : *tflite::micro::GetTensorData(constant_values); - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteUInt8: { - uint8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt8: { - int8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt32: { - int32_t pad_value = - constant_values == nullptr - ? 0 - : *tflite::micro::GetTensorData(constant_values); - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - default: - - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported by Pad.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -#undef TF_LITE_PAD - return kTfLiteOk; -} - -} // namespace pad - -TfLiteRegistration Register_PAD() { - return {/*init=*/pad::Init, - /*free=*/nullptr, - /*prepare=*/pad::Prepare, - /*invoke=*/pad::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -// Also register Pad as PadV2. -TfLiteRegistration Register_PADV2() { - return {/*init=*/pad::Init, - /*free=*/nullptr, - /*prepare=*/pad::Prepare, - /*invoke=*/pad::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pooling.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pooling.cc deleted file mode 100644 index 64aef0e1..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/pooling.cc +++ /dev/null @@ -1,269 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/pooling.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pooling { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - TfLitePaddingValues padding; - int32_t activation_min; - int32_t activation_max; - float activation_min_f32; - float activation_max_f32; -}; - -TfLiteStatus CalculateOpData(const TfLiteContext* context, - const TfLitePoolParams* params, - const TfLiteTensor* input, - const TfLiteTensor* output, OpData* data) { - // input: batch, height, width, channel - int height = SizeOfDimension(input, 1); - int width = SizeOfDimension(input, 2); - - int out_height, out_width; - - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - /*dilation_rate_height=*/1, - /*dilation_rate_width=*/1, height, width, params->filter_height, - params->filter_width, params->padding, &out_height, &out_width); - - return kTfLiteOk; -} - -void AverageEvalFloat(const TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.float_activation_min = data->activation_min_f32; - op_params.float_activation_max = data->activation_max_f32; - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - TFLITE_DCHECK(input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); - - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.quantized_activation_min = data->activation_min; - op_params.quantized_activation_max = data->activation_max; - - if (input->type == kTfLiteUInt8) { - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::AveragePool( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.float_activation_min = data->activation_min_f32; - op_params.float_activation_max = data->activation_max_f32; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.quantized_activation_min = data->activation_min; - op_params.quantized_activation_max = data->activation_max; - - if (input->type == kTfLiteUInt8) { - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::MaxPool( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} -} // namespace - -TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // Inputs and outputs share the same type, guaranteed by the converter. - switch (input->type) { - case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - AverageEvalQuantized(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input type %s is not currently supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: - MaxEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - MaxEvalQuantized(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - if (input->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, &data->activation_min_f32, - &data->activation_max_f32); - } else if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - CalculateActivationRangeQuantized(context, params->activation, output, - &data->activation_min, - &data->activation_max); - } - - return kTfLiteOk; -} - -} // namespace pooling - -TfLiteRegistration Register_AVERAGE_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::Prepare, - /*invoke=*/pooling::AverageEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_MAX_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::Prepare, - /*invoke=*/pooling::MaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/prelu.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/prelu.cc deleted file mode 100644 index b48491d6..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/prelu.cc +++ /dev/null @@ -1,169 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/prelu.h" - -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { - -TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, - const TfLiteTensor* alpha, - TfLiteTensor* output, PreluParams* params) { - if (output->type == kTfLiteInt8 || output->type == kTfLiteUInt8 || - output->type == kTfLiteInt16) { - double real_multiplier_1 = static_cast(input->params.scale) / - static_cast(output->params.scale); - double real_multiplier_2 = static_cast(input->params.scale) * - static_cast(alpha->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier_1, ¶ms->output_multiplier_1, - ¶ms->output_shift_1); - QuantizeMultiplier(real_multiplier_2, ¶ms->output_multiplier_2, - ¶ms->output_shift_2); - - params->input_offset = -input->params.zero_point; - params->alpha_offset = -alpha->params.zero_point; - params->output_offset = output->params.zero_point; - } - - return kTfLiteOk; -} - -} // namespace - -inline void BroadcastPrelu4DSlowFloat( - const RuntimeShape& unextended_input1_shape, const float* input1_data, - const RuntimeShape& unextended_input2_shape, const float* input2_data, - const RuntimeShape& unextended_output_shape, float* output_data) { - TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); - TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); - const RuntimeShape output_shape = - RuntimeShape::ExtendedShape(4, unextended_output_shape); - - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, - unextended_input2_shape, &desc1, &desc2); - - for (int b = 0; b < output_shape.Dims(0); ++b) { - for (int y = 0; y < output_shape.Dims(1); ++y) { - for (int x = 0; x < output_shape.Dims(2); ++x) { - for (int c = 0; c < output_shape.Dims(3); ++c) { - auto out_idx = Offset(output_shape, b, y, x, c); - auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); - auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); - auto in1_val = input1_data[in1_idx]; - auto in2_val = input2_data[in2_idx]; - output_data[out_idx] = in1_val >= 0.0f ? in1_val : in1_val * in2_val; - } - } - } - } -} - -void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(PreluParams)); -} - -TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - PreluParams* params = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* alpha = GetInput(context, node, 1); - TF_LITE_ENSURE(context, alpha != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - return CalculatePreluParams(input, alpha, output, params); -} - -TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const PreluParams& params = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* alpha = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - switch (input->type) { - case kTfLiteFloat32: { - BroadcastPrelu4DSlowFloat(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteUInt8: { - reference_ops::BroadcastPrelu4DSlow( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteInt8: { - reference_ops::BroadcastPrelu4DSlow( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - default: - TF_LITE_KERNEL_LOG( - context, "Only float32 and uint8_t are supported currently, got %d.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -} - -} // namespace activations - -TfLiteRegistration Register_PRELU() { - return {/*init=*/activations::PreluInit, - /*free=*/nullptr, - /*prepare=*/activations::PreluPrepare, - /*invoke=*/activations::PreluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize.cc deleted file mode 100644 index f62addbb..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize.cc +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/quantize.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, - sizeof(OpDataQuantizeReference)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - auto* data = static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - // TODO(b/128934713): Add support for fixed-point per-channel quantization. - // Currently this only support affine per-layer quantization. - TF_LITE_ENSURE_EQ(context, output->quantization.type, - kTfLiteAffineQuantization); - const auto* affine_quantization = - reinterpret_cast(output->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); - - TF_LITE_ENSURE(context, input->type == kTfLiteFloat32 || - input->type == kTfLiteInt16 || - input->type == kTfLiteInt8); - TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || - output->type == kTfLiteInt8 || - output->type == kTfLiteInt16 || - output->type == kTfLiteInt32); - - if ((input->type == kTfLiteInt16 && output->type == kTfLiteInt8) || - (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) || - (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) || - (input->type == kTfLiteInt16 && output->type == kTfLiteInt32)) { - double effective_scale = static_cast(input->params.scale) / - static_cast(output->params.scale); - - QuantizeMultiplier(effective_scale, &data->requantize_output_multiplier, - &data->requantize_output_shift); - } - - data->quantization_params.zero_point = output->params.zero_point; - data->quantization_params.scale = static_cast(output->params.scale); - - data->input_zero_point = input->params.zero_point; - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_QUANTIZE() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/EvalQuantizeReference, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize.h deleted file mode 100644 index aefe6244..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize.h +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/types.h" - -namespace tflite { - -struct OpDataQuantizeReference { - tflite::QuantizationParams quantization_params; - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t requantize_output_multiplier; - int requantize_output_shift; - - int32_t input_zero_point; -}; - -TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_QUANTIZE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize_common.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize_common.cc deleted file mode 100644 index 2c4a8d2c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/quantize_common.cc +++ /dev/null @@ -1,122 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/quantize.h" -#include "tensorflow/lite/kernels/internal/reference/requantize.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/quantize.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - auto* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - if (input->type == kTfLiteFloat32) { - switch (output->type) { - case kTfLiteInt8: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteUInt8: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::AffineQuantize( - data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt16) { - size_t size = ElementCount(*input->dims); - switch (output->type) { - case kTfLiteInt8: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->requantize_output_multiplier, data->requantize_output_shift, - data->input_zero_point, data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt16: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->requantize_output_multiplier, data->requantize_output_shift, - data->input_zero_point, data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - case kTfLiteInt32: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->requantize_output_multiplier, data->requantize_output_shift, - data->input_zero_point, data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else if (input->type == kTfLiteInt8) { - // Int8 to Int8 requantization, required if the input and output tensors - // have different scales and/or zero points. - size_t size = ElementCount(*input->dims); - switch (output->type) { - case kTfLiteInt8: - reference_ops::Requantize( - tflite::micro::GetTensorData(input), size, - data->requantize_output_multiplier, data->requantize_output_shift, - data->input_zero_point, data->quantization_params.zero_point, - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/reduce.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/reduce.cc deleted file mode 100644 index 8c60269c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/reduce.cc +++ /dev/null @@ -1,342 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/reduce.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace reduce { - -constexpr int kMaxNumberOfAxis = 4; -constexpr int kMaxNumberOfReducedAxis = 2; - -struct OpData { - int32_t multiplier; - int shift; - int temp_buffer_idx; - int resolved_axis_idx; - int input_zp; - float input_scale; - int output_zp; - float output_scale; - int num_output_elements; -}; - -void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) { - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { - // Inputs Tensor (dtype depends on quantization): - // [0] = Input - // [1] = Axis - const TfLiteTensor* input = GetInput(context, node, 0); - - // Outputs Tensor (dtype depends on quantization): - // [0] = Output - - // Validate number of inputs and outputs - TF_LITE_ENSURE_EQ(context, node->inputs->size, 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Validate axis type - const TfLiteTensor* axis = GetInput(context, node, 1); - TF_LITE_ENSURE(context, axis != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32); - - if (input->type == kTfLiteInt8) { - OpData* data = static_cast(node->user_data); - const TfLiteTensor* output = GetOutput(context, node, 0); - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->multiplier, &data->shift); - } - - return kTfLiteOk; -} - -TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); - - OpData* op_data = static_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, 0); - const TfLiteTensor* output = GetOutput(context, node, 0); - const TfLiteTensor* axis = GetInput(context, node, 1); - - op_data->input_scale = input->params.scale; - op_data->output_scale = output->params.scale; - op_data->num_output_elements = NumElements(output); - - context->RequestScratchBufferInArena(context, sizeof(int) * input->dims->size, - &op_data->temp_buffer_idx); - context->RequestScratchBufferInArena( - context, sizeof(int) * static_cast(ElementCount(*axis->dims)), - &op_data->resolved_axis_idx); - - return kTfLiteOk; -} - -TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, 0); - OpData* op_data = reinterpret_cast(node->user_data); - const TfLiteTensor* output = GetOutput(context, node, 0); - if (input->type == kTfLiteInt8) { - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &op_data->multiplier, &op_data->shift); - } - - int output_size = NumElements(output); - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t), - &op_data->temp_buffer_idx); - op_data->input_zp = input->params.zero_point; - op_data->input_scale = input->params.scale; - op_data->output_zp = output->params.zero_point; - op_data->output_scale = output->params.scale; - } - - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); - // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018) - return kTfLiteOk; -} - -void ResolveAxis(const int* axis_data, int axis_count, - tflite::MeanParams* op_params) { - int i = 0; - for (; i < axis_count; ++i) { - op_params->axis[i] = static_cast(axis_data[i]); - } - for (; i < 4; ++i) { - op_params->axis[i] = 1; - } - op_params->axis_count = axis_count; -} - -TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TfLiteReducerParams* params = - reinterpret_cast(node->builtin_data); - OpData* op_data = reinterpret_cast(node->user_data); - - int num_axis = static_cast(ElementCount(*axis->dims)); - int temp_index[kMaxNumberOfAxis]; - int resolved_axis[kMaxNumberOfReducedAxis]; - - tflite::MeanParams op_params; - ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, &op_params); - - // Special case mean implementation exists for 4D mean across axes 1 and 2. - bool special_case_4d_axes_1_and_2 = - input->dims->size == 4 && op_params.axis_count == 2 && - ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - - switch (input->type) { - case kTfLiteFloat32: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, - tflite::micro::GetTensorData(output))); - } - } break; - case kTfLiteInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_integer_ops::Mean( - op_params, op_data->multiplier, op_data->shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), op_data->input_zp, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), op_data->output_zp); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, temp_buffer)); - } else { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - case kTfLiteUInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - op_data->input_zp, op_data->input_scale, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - uint32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean(tflite::micro::GetTensorData(input), - input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, - resolved_axis, temp_buffer)); - } else { - uint32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - default: - TF_LITE_ENSURE_MSG(context, false, - "Currently, only float32, int8 or uint8 input type " - "is supported."); - } - return kTfLiteOk; -} - -TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TfLiteReducerParams* params = - static_cast(node->builtin_data); - OpData* op_data = static_cast(node->user_data); - - // Interpret an axis tensor with null dimensions as a scalar - int num_axis = static_cast(ElementCount(*axis->dims)); - int* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - int* resolved_axis = static_cast( - context->GetScratchBuffer(context, op_data->resolved_axis_idx)); - switch (input->type) { - case kTfLiteFloat32: - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const float current, const float in) -> float { - return (in > current) ? in : current; - })); - break; - case kTfLiteInt8: - TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), - static_cast(op_data->output_scale)); - TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const int8_t current, const int8_t in) -> int8_t { - return (in > current) ? in : current; - })); - break; - default: - TF_LITE_KERNEL_LOG(context, - "Only float32 and int8 types are supported.\n"); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace reduce - -TfLiteRegistration Register_MEAN() { - return {/*init=*/reduce::InitReduce, - /*free=*/nullptr, - /*prepare=*/reduce::PrepareMeanOrSum, - /*invoke=*/reduce::EvalMean, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_REDUCE_MAX() { - return {/*init=*/reduce::InitReduce, - /*free=*/nullptr, - /*prepare=*/reduce::PrepareMax, - /*invoke=*/reduce::EvalMax, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/reshape.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/reshape.cc deleted file mode 100644 index 8e47e2a0..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/reshape.cc +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace reshape { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - // Tensorflow's Reshape allows one of the shape components to have the - // special -1 value, meaning it will be calculated automatically based on the - // input. Here we calculate what that dimension should be so that the number - // of output elements in the same as the number of input elements. - int num_input_elements = NumElements(input); - TfLiteIntArray* output_shape = output->dims; - - if (NumInputs(node) == 1 && // Legacy scalar supported with params. - output_shape->size == 1 && output_shape->data[0] == 0) { - // Legacy tflite models use a shape parameter of [0] to indicate scalars, - // so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during - // toco conversion. - output_shape->size = 0; - } - - int num_output_elements = 1; - int stretch_dim = -1; - for (int i = 0; i < output_shape->size; ++i) { - int value = output_shape->data[i]; - if (value == -1) { - TF_LITE_ENSURE_EQ(context, stretch_dim, -1); - stretch_dim = i; - } else { - num_output_elements *= value; - } - } - if (stretch_dim != -1) { - output_shape->data[stretch_dim] = num_input_elements / num_output_elements; - num_output_elements *= output_shape->data[stretch_dim]; - } - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TF_LITE_ENSURE_EQ(context, ReshapeOutput(context, node), kTfLiteOk); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // TODO(b/162522304): storing input bytes in OpData increases some models - // significantly, possibly due to alignment issues. - size_t input_bytes; - TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(input->type, &input_bytes)); - input_bytes *= ElementCount(*input->dims); - - // Do nothing for in-place reshape. - if (input->data.raw != output->data.raw) { - // Otherwise perform reshape with copy. - for (size_t i = 0; i < input_bytes; ++i) { - output->data.raw[i] = input->data.raw[i]; - } - } - return kTfLiteOk; -} - -} // namespace reshape - -TfLiteRegistration Register_RESHAPE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/reshape::Prepare, - /*invoke=*/reshape::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc deleted file mode 100644 index 971de83c..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace resize_nearest_neighbor { - -constexpr int kInputTensor = 0; -constexpr int kSizeTensor = 1; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* size = GetInput(context, node, kSizeTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - // Our current implementations rely on the input being 4D, - // and the size being 1D tensor with exactly 2 elements. - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); - TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); - TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); - TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2); - - output->type = input->type; - - if (!IsConstantTensor(size)) { - TF_LITE_KERNEL_LOG(context, "Dynamic tensors are unsupported in tfmicro."); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* size = - tflite::micro::GetEvalInput(context, node, kSizeTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - tflite::ResizeNearestNeighborParams op_params; - op_params.align_corners = params->align_corners; - op_params.half_pixel_centers = false; - - if (output->type == kTfLiteFloat32) { - reference_ops::ResizeNearestNeighbor( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteUInt8) { - reference_ops::ResizeNearestNeighbor( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt8) { - reference_ops::ResizeNearestNeighbor( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(size), - tflite::micro::GetTensorData(size), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_KERNEL_LOG(context, - "Output type is %d, requires float, uint8_t or int8_t.", - output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} -} // namespace resize_nearest_neighbor - -TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/resize_nearest_neighbor::Prepare, - /*invoke=*/resize_nearest_neighbor::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/round.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/round.cc deleted file mode 100644 index 5804016b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/round.cc +++ /dev/null @@ -1,76 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/round.h" - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace round { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); - TF_LITE_ENSURE_EQ(context, output->bytes, input->bytes); - TF_LITE_ENSURE_EQ(context, output->dims->size, input->dims->size); - for (int i = 0; i < output->dims->size; ++i) { - TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); - } - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - reference_ops::Round(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; -} -} // namespace round - -TfLiteRegistration Register_ROUND() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/round::Prepare, - /*invoke=*/round::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/shape.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/shape.cc deleted file mode 100644 index df962f62..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/shape.cc +++ /dev/null @@ -1,73 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -void ExtractShape(const TfLiteEvalTensor* input, int32_t* output_data) { - for (int i = 0; i < input->dims->size; ++i) { - output_data[i] = input->dims->data[i]; - } -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - if (output->type != kTfLiteInt32) { - TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } else { - ExtractShape(input, tflite::micro::GetTensorData(output)); - } - - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SHAPE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/softmax.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/softmax.cc deleted file mode 100644 index c96fa561..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/softmax.cc +++ /dev/null @@ -1,223 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/softmax.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// Softmax parameter data that persists in user_data -static constexpr int kInt16LUTArraySize = 513; - -TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, - const TfLiteTensor* input, - TfLiteTensor* output, - const TfLiteSoftmaxParams* params, - SoftmaxParams* op_data) { - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8 || - input->type == kTfLiteInt16) { - if (input->type == kTfLiteUInt8) { - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteUInt8); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - } else if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768, - (0.001f * 1.f / 32768)); - } else { // input->type == kTfLiteInt8 - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768); - TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 65536, - (0.001f * 1.f / 65536)); - } else { // output->type == kTfLiteint8 - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); - TF_LITE_ENSURE(context, output->params.scale == 1.f / 256); - } - } - - static const int kScaledDiffIntegerBits = 5; - - // Calculate input_multiplier and input_left_shift - if (input->type == kTfLiteInt16) { - int input_left_shift; - double input_scale_beta_rescale = - static_cast(input->params.scale) * - static_cast(params->beta) / - (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] - // correspond to [-10.0, 0.0] - QuantizeMultiplier(input_scale_beta_rescale, &op_data->input_multiplier, - &input_left_shift); - op_data->input_left_shift = input_left_shift; - } else { - int input_left_shift; - tflite::PreprocessSoftmaxScaling( - static_cast(params->beta), - static_cast(input->params.scale), kScaledDiffIntegerBits, - &op_data->input_multiplier, &input_left_shift); - op_data->input_left_shift = input_left_shift; - op_data->diff_min = - -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, - op_data->input_left_shift); - } - } else { - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - op_data->beta = static_cast(params->beta); - } - return kTfLiteOk; -} - -// Takes a tensor and performs softmax along the last dimension. -void SoftmaxFloat(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - tflite::reference_ops::Softmax(op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - if (input->type == kTfLiteUInt8) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (input->type == kTfLiteInt8) { - if (output->type == kTfLiteInt16) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - tflite::reference_ops::SoftmaxInt16( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams)); -} - -TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) >= 1); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, node->user_data != nullptr); - SoftmaxParams* op_data = static_cast(node->user_data); - // Only allocate LUTs for KTfLiteInt16 data type - if (input->type == kTfLiteInt16) { - void* raw_exp_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, raw_exp_lut != nullptr); - op_data->exp_lut = reinterpret_cast(raw_exp_lut); - void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); - op_data->one_over_one_plus_x_lut = - reinterpret_cast(one_over_one_plus_x_lut); - } - - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || - input->type == kTfLiteUInt8 || - input->type == kTfLiteInt16); - } else { - TF_LITE_ENSURE_EQ(context, input->type, output->type); - } - - // Populate LUT if required - if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - // exp LUT only used on negative values - // we consider exp(-10.0) is insignificant to accumulation - gen_lut([](float value) { return std::exp(value); }, -10.0f, 0.0f, - op_data->exp_lut, kInt16LUTArraySize); - gen_lut([](float value) { return 1.0f / (1.0f + value); }, 0.0f, 1.0f, - op_data->one_over_one_plus_x_lut, kInt16LUTArraySize); - op_data->zero_point = output->params.zero_point; - op_data->scale = output->params.scale; - } - - auto* params = static_cast(node->builtin_data); - return CalculateSoftmaxParams(context, input, output, params, op_data); -} - -TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - TFLITE_DCHECK(node->user_data != nullptr); - SoftmaxParams op_data = *static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - SoftmaxFloat(input, output, op_data); - return kTfLiteOk; - } - case kTfLiteInt8: - case kTfLiteUInt8: - case kTfLiteInt16: { - SoftmaxQuantized(input, output, op_data); - return kTfLiteOk; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } -} -} // namespace - -TfLiteRegistration Register_SOFTMAX() { - return {/*init=*/SoftmaxInit, - /*free=*/nullptr, - /*prepare=*/SoftmaxPrepare, - /*invoke=*/SoftmaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/split.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/split.cc deleted file mode 100644 index a1236d71..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/split.cc +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace split { - -template -TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input, int axis_value) { - const int output_count = NumOutputs(node); - const TfLiteIntArray* input_dims = input->dims; - const TfLiteEvalTensor* output0 = - tflite::micro::GetEvalOutput(context, node, 0); - const TfLiteIntArray* output_dims = output0->dims; - - const int split_dimensions = input_dims->size; - int axis = axis_value < 0 ? axis_value + split_dimensions : axis_value; - - TFLITE_DCHECK_LT(axis, split_dimensions); - TFLITE_DCHECK_EQ(output_dims->size, split_dimensions); - - int64_t split_size = output_dims->data[axis] * output_count; - - TFLITE_DCHECK_EQ(split_size, input_dims->data[axis]); - int64_t outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= input_dims->data[i]; - } - - int64_t base_inner_size = 1; - for (int i = axis + 1; i < split_dimensions; ++i) { - base_inner_size *= input_dims->data[i]; - } - - const T* input_ptr = tflite::micro::GetTensorData(input); - for (int k = 0; k < outer_size; ++k) { - for (int i = 0; i < output_count; ++i) { - TfLiteEvalTensor* t = tflite::micro::GetEvalOutput(context, node, i); - T* output_data = tflite::micro::GetTensorData(t); - const int copy_size = output_dims->data[axis] * base_inner_size; - T* output_ptr = output_data + k * copy_size; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - input_ptr += copy_size; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* axis = GetInput(context, node, 0); - TF_LITE_ENSURE(context, axis != nullptr); - - // Dynamic output tensors are needed if axis tensor is not constant. - // But Micro doesn't support dynamic memory allocation, so we only support - // constant axis tensor for now. - TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), - "Non constant axis tensor not supported"); - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 1); - - int axis_value = tflite::micro::GetTensorData(axis)[0]; - if (axis_value < 0) { - axis_value += input->dims->size; - } - - TF_LITE_ENSURE(context, axis_value >= 0); - TF_LITE_ENSURE(context, axis_value < input->dims->size); - - switch (input->type) { - case kTfLiteFloat32: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteUInt8: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt8: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt16: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt32: { - return SplitImpl(context, node, input, axis_value); - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -#undef TF_LITE_SPLIT - - return kTfLiteOk; -} - -} // namespace split - -TfLiteRegistration Register_SPLIT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/split::Prepare, - /*invoke=*/split::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/split_v.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/split_v.cc deleted file mode 100644 index 600523ab..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/split_v.cc +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace split_v { - -template -TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input, int axis_value) { - const TfLiteIntArray* input_dims = input->dims; - const TfLiteEvalTensor* output0 = - tflite::micro::GetEvalOutput(context, node, 0); - - const int split_dimensions = input_dims->size; - - TFLITE_DCHECK_LT(axis_value, split_dimensions); - TFLITE_DCHECK_EQ(output0->dims->size, split_dimensions); - - int64_t split_size = 0; - const int output_count = NumOutputs(node); - for (int i = 0; i < output_count; i++) { - split_size += - tflite::micro::GetEvalOutput(context, node, i)->dims->data[axis_value]; - } - TFLITE_DCHECK_EQ(split_size, input_dims->data[axis_value]); - int64_t outer_size = 1; - for (int i = 0; i < axis_value; ++i) { - outer_size *= input_dims->data[i]; - } - - int64_t base_inner_size = 1; - for (int i = axis_value + 1; i < split_dimensions; ++i) { - base_inner_size *= input_dims->data[i]; - } - - const T* input_ptr = tflite::micro::GetTensorData(input); - for (int k = 0; k < outer_size; ++k) { - for (int i = 0; i < output_count; ++i) { - TfLiteEvalTensor* output_tensor = - tflite::micro::GetEvalOutput(context, node, i); - T* output_data = tflite::micro::GetTensorData(output_tensor); - const int copy_size = - output_tensor->dims->data[axis_value] * base_inner_size; - T* output_ptr = output_data + k * copy_size; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - input_ptr += copy_size; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - - // Dynamic output tensors are needed if axis tensor is not constant. - // But Micro doesn't support dynamic memory allocation, so we only support - // constant axis tensor for now. - const TfLiteTensor* axis = GetInput(context, node, 2); - TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), - "Non constant axis tensor not supported"); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 2); - - int axis_value = tflite::micro::GetTensorData(axis)[0]; - if (axis_value < 0) { - axis_value += input->dims->size; - } - - TF_LITE_ENSURE(context, axis_value >= 0); - TF_LITE_ENSURE(context, axis_value < input->dims->size); - - switch (input->type) { - case kTfLiteFloat32: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt8: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt16: { - return SplitImpl(context, node, input, axis_value); - } - case kTfLiteInt32: { - return SplitImpl(context, node, input, axis_value); - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace split_v - -TfLiteRegistration Register_SPLIT_V() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/split_v::Prepare, - /*invoke=*/split_v::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/strided_slice.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/strided_slice.cc deleted file mode 100644 index 2dbe6e1d..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/strided_slice.cc +++ /dev/null @@ -1,192 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/internal/reference/strided_slice.h" - -#include -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace strided_slice { - -constexpr int kInputTensor = 0; -constexpr int kBeginTensor = 1; -constexpr int kEndTensor = 2; -constexpr int kStridesTensor = 3; -constexpr int kOutputTensor = 0; - -struct StridedSliceContext { - StridedSliceContext(TfLiteContext* context, TfLiteNode* node) { - params = reinterpret_cast(node->builtin_data); - input = GetInput(context, node, kInputTensor); - begin = GetInput(context, node, kBeginTensor); - end = GetInput(context, node, kEndTensor); - strides = GetInput(context, node, kStridesTensor); - output = GetOutput(context, node, kOutputTensor); - dims = NumDimensions(input); - } - const TfLiteStridedSliceParams* params; - const TfLiteTensor* input; - const TfLiteTensor* begin; - const TfLiteTensor* end; - const TfLiteTensor* strides; - TfLiteTensor* output; - int dims; -}; - -// This Op only supports 1-4D cases and since we use the reference 4D -// implementation, the 1-3D tensors are mapped to 4D. -const int kMaxDim = 4; - -tflite::StridedSliceParams BuildStridedSliceParams( - StridedSliceContext* op_context) { - tflite::StridedSliceParams op_params; - op_params.start_indices_count = op_context->dims; - op_params.stop_indices_count = op_context->dims; - op_params.strides_count = op_context->dims; - - for (int i = 0; i < op_context->dims; ++i) { - op_params.start_indices[i] = GetTensorData(op_context->begin)[i]; - op_params.stop_indices[i] = GetTensorData(op_context->end)[i]; - op_params.strides[i] = GetTensorData(op_context->strides)[i]; - } - - op_params.begin_mask = op_context->params->begin_mask; - op_params.ellipsis_mask = 0; - op_params.end_mask = op_context->params->end_mask; - op_params.new_axis_mask = 0; - op_params.shrink_axis_mask = op_context->params->shrink_axis_mask; - return op_params; -} - -// Processes the indexing tensors (begin, end and strides) to resize the -// output tensor. This function is callable from both Prepare() and Eval() as -// long as the caller ensures the indexing tensors are present. -TfLiteStatus CheckOutputSize(TfLiteContext* context, - StridedSliceContext* op_context) { - using ::tflite::strided_slice::StartForAxis; - using ::tflite::strided_slice::StopForAxis; - TfLiteIntArray* output_shape = op_context->output->dims; - int shape_size = 0; - auto op_params = BuildStridedSliceParams(op_context); - auto input_shape = GetTensorShape(op_context->input); - for (int idx = 0; idx < op_context->dims; ++idx) { - int32_t stride = GetTensorData(op_context->strides)[idx]; - TF_LITE_ENSURE_MSG(context, stride != 0, "stride value has to be non-zero"); - int32_t begin = StartForAxis(op_params, input_shape, idx); - int32_t end = StopForAxis(op_params, input_shape, idx, begin); - - // When shrinking an axis, the end position does not matter (and can be - // incorrect when negative indexing is used, see Issue #19260). Always use - // begin + 1 to generate a length 1 slice, since begin has - // already been adjusted for negative indices by StartForAxis. - const bool shrink_axis = op_context->params->shrink_axis_mask & (1 << idx); - if (shrink_axis) { - end = begin + 1; - } - - // This is valid for both positive and negative strides - int32_t dim_shape = std::ceil((end - begin) / static_cast(stride)); - dim_shape = dim_shape < 0 ? 0 : dim_shape; - if (!shrink_axis) { - TF_LITE_ENSURE_EQ(context, output_shape->data[shape_size], dim_shape); - shape_size++; - } - } - TF_LITE_ENSURE_EQ(context, output_shape->size, shape_size); - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(StridedSliceParams)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - StridedSliceParams* op_params = - static_cast(node->user_data); - TF_LITE_ENSURE_EQ(context, NumInputs(node), 4); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - StridedSliceContext op_context(context, node); - TF_LITE_ENSURE_MSG(context, op_context.dims <= kMaxDim, - "input dim should not exceed 4"); - auto params = BuildStridedSliceParams(&op_context); - memcpy(op_params, ¶ms, sizeof(StridedSliceParams)); - return CheckOutputSize(context, &op_context); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const StridedSliceParams& op_params = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - switch (output->type) { - case kTfLiteFloat32: - reference_ops::StridedSlice(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteUInt8: - reference_ops::StridedSlice( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - case kTfLiteInt8: - reference_ops::StridedSlice(op_params, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} -} // namespace strided_slice - -TfLiteRegistration Register_STRIDED_SLICE() { - return {/*init=*/strided_slice::Init, - /*free=*/nullptr, - /*prepare=*/strided_slice::Prepare, - /*invoke=*/strided_slice::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/sub.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/sub.cc deleted file mode 100644 index 2cc61a9b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/sub.cc +++ /dev/null @@ -1,256 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/sub.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/internal/types.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace sub { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteSubParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const float twice_max_input_scale = - 2 * std::max(input1->params.scale, input2->params.scale); - const double real_input1_multiplier = - static_cast(input1->params.scale / twice_max_input_scale); - const double real_input2_multiplier = - static_cast(input2->params.scale / twice_max_input_scale); - const double real_output_multiplier = - static_cast(twice_max_input_scale / - ((1 << data->left_shift) * output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - return kTfLiteOk; -} - -void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - tflite::ArithmeticParams op_params; - SetActivationParams(output_activation_min, output_activation_max, &op_params); - if (data->requires_broadcast) { - tflite::reference_ops::BroadcastSubSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::SubWithActivation( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus EvalSubQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteSubParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - tflite::reference_ops::BroadcastSubSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Sub( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - if (need_broadcast) { - tflite::reference_ops::BroadcastSubSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Sub( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - if (output->type == kTfLiteFloat32) { - EvalSub(context, node, params, &data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalSubQuantized(context, node, params, &data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace sub - -TfLiteRegistration Register_SUB() { - return {/*init=*/sub::Init, - /*free=*/nullptr, - /*prepare=*/sub::Prepare, - /*invoke=*/sub::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf.cc deleted file mode 100644 index 9ea43d43..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf.cc +++ /dev/null @@ -1,391 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/kernels/svdf.h" - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/activation_utils.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -// Input tensors. -constexpr int kInputTensor = 0; -constexpr int kWeightsFeatureTensor = 1; -constexpr int kWeightsTimeTensor = 2; -constexpr int kBiasTensor = 3; -// This is a variable tensor, and will be modified by this op. -constexpr int kInputActivationStateTensor = 4; - -// Output tensor. -constexpr int kOutputTensor = 0; - -/** - * This version of SVDF is specific to TFLite Micro. It contains the following - * differences between the TFLite version: - * - * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time - * for the Micro interpreter. - * 2.) Output dimensions - the TFLite version determines output size and runtime - * and resizes the output tensor. Micro runtime does not support tensor - * resizing. - */ -static inline void ApplyTimeWeightsBiasAndActivation( - int batch_size, int memory_size, int num_filters, int num_units, int rank, - const float* const __restrict__ weights_time_ptr, - const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation, - float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr, - float* const __restrict__ output_ptr) { - // Compute matmul(activation_state, weights_time). - for (int b = 0; b < batch_size; ++b) { - // Perform batched vector dot product: - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - const float* vector1_ptr = weights_time_ptr; - const float* vector2_ptr = state_ptr + b * memory_size * num_filters; - for (int i = 0; i < num_filters; ++i) { - *scratch_ptr_batch = 0.f; - for (int j = 0; j < memory_size; ++j) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; - } - scratch_ptr_batch++; - } - } - - // Initialize output with bias if provided. - if (bias_ptr) { - // VectorBatchVectorAssign - for (int i = 0; i < batch_size; ++i) { - float* output_data = output_ptr + i * num_units; - const float* bias_data = bias_ptr; - for (int j = 0; j < num_units; ++j) { - *output_data++ = *bias_data++; - } - } - } else { - float* output_data = output_ptr; - for (int i = 0; i < batch_size * num_units; ++i) { - *output_data++ = 0.0f; - } - } - - // Reduction sum. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - - // Reduction sum vector - for (int i = 0; i < num_units; ++i) { - for (int j = 0; j < rank; j++) { - output_ptr_batch[i] += *scratch_ptr_batch++; - } - } - } - - // Apply activation. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - for (int i = 0; i < num_units; ++i) { - *output_ptr_batch = - tflite::ops::micro::ActivationValFloat(activation, *output_ptr_batch); - ++output_ptr_batch; - } - } -} - -inline void EvalFloatSVDF( - TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* weights_feature, - const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, - const TfLiteSVDFParams* params, int scratch_tensor_index, - TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output) { - const int rank = params->rank; - const int batch_size = input->dims->data[0]; - const int input_size = input->dims->data[1]; - const int num_filters = weights_feature->dims->data[0]; - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - const float* weights_feature_ptr = - tflite::micro::GetTensorData(weights_feature); - const float* weights_time_ptr = - tflite::micro::GetTensorData(weights_time); - const float* bias_ptr = tflite::micro::GetTensorData(bias); - const float* input_ptr = tflite::micro::GetTensorData(input); - - float* state_ptr = tflite::micro::GetTensorData(activation_state); - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - float* scratch_ptr = static_cast( - context->GetScratchBuffer(context, scratch_tensor_index)); - - float* output_ptr = tflite::micro::GetTensorData(output); - - // Left shift the activation_state. - { - float* new_state_start = state_ptr; - const float* old_state_start = state_ptr + 1; - const float* old_state_end = - state_ptr + batch_size * num_filters * memory_size; - while (old_state_start != old_state_end) { - *new_state_start++ = *old_state_start++; - } - } - - // Note: no need to clear the latest activation, matmul is not accumulative. - - // Compute conv1d(inputs, weights_feature). - // The activation_state's rightmost column is used to save current cycle - // activation. This is achieved by starting at state_ptr[memory_size - 1] and - // having the stride equal to memory_size. - - // Perform batched matrix vector multiply operation: - { - const float* matrix = weights_feature_ptr; - const float* vector = input_ptr; - float* result = &state_ptr[memory_size - 1]; - float* result_in_batch = result; - for (int i = 0; i < batch_size; ++i) { - const float* matrix_ptr = matrix; - for (int j = 0; j < num_filters; ++j) { - float dot_prod = 0.0f; - const float* vector_in_batch = vector + i * input_size; - for (int k = 0; k < input_size; ++k) { - dot_prod += *matrix_ptr++ * *vector_in_batch++; - } - *result_in_batch = dot_prod; - result_in_batch += memory_size; - } - } - } - - ApplyTimeWeightsBiasAndActivation( - batch_size, memory_size, num_filters, num_units, rank, weights_time_ptr, - bias_ptr, params->activation, state_ptr, scratch_ptr, output_ptr); -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - - const auto* params = static_cast(node->builtin_data); - - // Validate Tensor Inputs (dtype depends on quantization): - // [0] = Input, {2, batch_size, input_size} - // [1] = Weights Feature, {2, num_filters, input_size} - // [2] = Weights Time, {2, num_filters, memory_size} - // [3] = Bias (optional), {1, num_units} - // [4] = Activation State (variable), - // {2, batch_size, memory_size * num_filters} - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* weights_feature = - GetInput(context, node, kWeightsFeatureTensor); - TF_LITE_ENSURE(context, weights_feature != nullptr); - const TfLiteTensor* weights_time = - GetInput(context, node, kWeightsTimeTensor); - TF_LITE_ENSURE(context, weights_time != nullptr); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - const TfLiteTensor* activation_state = - GetInput(context, node, kInputActivationStateTensor); - TF_LITE_ENSURE(context, activation_state != nullptr); - - // Define input constants based on input tensor definition above: - const int rank = params->rank; - const int input_size = input->dims->data[1]; - const int batch_size = input->dims->data[0]; - const int num_filters = weights_feature->dims->data[0]; - TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - // Validate Input Tensor: - TF_LITE_ENSURE(context, - input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); - - // Validate Tensor Output: - // [0] = float/int8_t, {2, batch_size, num_units} - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); - TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); - - // Validate Weights Feature Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); - TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); - - // Validate Weights Time Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); - - // Validate Optional Bias Input Tensor: - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); - } - - // Validate Activation State Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], - memory_size * num_filters); - // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. - TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); - - TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteInt16); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); - } - - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - - const double effective_scale_1 = static_cast( - input->params.scale * weights_feature->params.scale / - activation_state->params.scale); - const double effective_scale_2 = - static_cast(activation_state->params.scale * - weights_time->params.scale / output->params.scale); - - // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. - TF_LITE_ENSURE( - context, - std::abs(static_cast(bias->params.scale) - - static_cast(activation_state->params.scale * - weights_time->params.scale)) < 1e-5); - - QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), - &(data->effective_scale_1_b)); - QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), - &(data->effective_scale_2_b)); - - data->input_zero_point = input->params.zero_point; - data->output_zero_point = output->params.zero_point; - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(int32_t), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - - const TfLiteStatus scratch_output_status = - context->RequestScratchBufferInArena( - context, batch_size * num_units * sizeof(int32_t), - &(data->scratch_output_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_output_status); - } else { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); - } - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(float), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* weights_feature = - tflite::micro::GetEvalInput(context, node, kWeightsFeatureTensor); - const TfLiteEvalTensor* weights_time = - tflite::micro::GetEvalInput(context, node, kWeightsTimeTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 5) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( - context, node, kInputActivationStateTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (weights_feature->type) { - case kTfLiteFloat32: { - EvalFloatSVDF(context, node, input, weights_feature, weights_time, bias, - params, data.scratch_tensor_index, activation_state, - output); - return kTfLiteOk; - break; - } - - case kTfLiteInt8: { - EvalIntegerSvdfReference(context, node, input, weights_feature, - weights_time, bias, params, activation_state, - output, data); - return kTfLiteOk; - break; - } - - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(weights_feature->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SVDF() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf.h deleted file mode 100644 index b10ede6b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf.h +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -struct OpData { - int32_t effective_scale_1_a; - int32_t effective_scale_2_a; - // b versions of each scale are kept at int since the numbers are just the - // shift value - typically between [-32, 32]. - int effective_scale_1_b; - int effective_scale_2_b; - int scratch_tensor_index; - int scratch_output_tensor_index; - - // Cached tensor zero point values for quantized operations. - int input_zero_point; - int output_zero_point; -}; - -// TensorflowLite Micro-specific reference implementation for Integer SVDF. -void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, - const OpData& data); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf_common.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf_common.cc deleted file mode 100644 index dcbe02d6..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/svdf_common.cc +++ /dev/null @@ -1,177 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/activation_utils.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/svdf.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { - -void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, - const OpData& data) { - const int n_rank = params->rank; - const int n_batch = input_tensor->dims->data[0]; - const int n_input = input_tensor->dims->data[1]; - const int n_filter = weights_feature_tensor->dims->data[0]; - const int n_unit = n_filter / n_rank; - const int n_memory = weights_time_tensor->dims->data[1]; - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - int32_t* scratch_tensor = static_cast( - context->GetScratchBuffer(context, data.scratch_tensor_index)); - int32_t* scratch_output_tensor = static_cast( - context->GetScratchBuffer(context, data.scratch_output_tensor_index)); - - // Shift states. - int16_t* const state_ptr = - tflite::micro::GetTensorData(activation_state_tensor); - - // Left shift the activation_state. - { - int16_t* new_state_start = state_ptr; - const int16_t* old_state_start = state_ptr + 1; - const int16_t* old_state_end = state_ptr + n_batch * n_filter * n_memory; - while (old_state_start != old_state_end) { - *new_state_start++ = *old_state_start++; - } - } - - // Note: no need to clear the latest activation, matmul is not accumulative. - - // Feature matmul. - { - int16_t* state = - tflite::micro::GetTensorData(activation_state_tensor); - const int8_t* input = tflite::micro::GetTensorData(input_tensor); - const int8_t* weight_feature = - tflite::micro::GetTensorData(weights_feature_tensor); - const int32_t output_max = std::numeric_limits::max(); - const int32_t output_min = std::numeric_limits::min(); - int16_t* result_in_batch = state + (n_memory - 1); - for (int b = 0; b < n_batch; b++) { - const int8_t* matrix_ptr = weight_feature; - for (int r = 0; r < n_filter; r++) { - int32_t dot_prod = 0; - const int8_t* vector_in_batch = input + b * n_input; - for (int c = 0; c < n_input; c++) { - dot_prod += - *matrix_ptr++ * (*vector_in_batch++ - data.input_zero_point); - } - dot_prod = MultiplyByQuantizedMultiplier( - dot_prod, data.effective_scale_1_a, data.effective_scale_1_b); - dot_prod = std::min(std::max(output_min, dot_prod), output_max); - // This assumes state is symmetrically quantized. Otherwise last bit of - // state should be initialized to its zero point and accumulate the - // dot_prod. - // Equivalent as the following: - // result_in_batch = zero point, which happens to be zero. - // result_in_batch += dot_prod_56. - *result_in_batch = dot_prod; - result_in_batch += n_memory; - } - } - } - - // Time. - { - for (int b = 0; b < n_batch; ++b) { - int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; - - // Perform batched vector dot product: - const int16_t* vector1_ptr = - tflite::micro::GetTensorData(weights_time_tensor); - const int16_t* vector2_ptr = - tflite::micro::GetTensorData(activation_state_tensor) + - b * n_memory * n_filter; - - for (int i = 0; i < n_filter; i++) { - *scratch_ptr_batch = 0; - for (int j = 0; j < n_memory; j++) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; - } - scratch_ptr_batch++; - } - } - } - - // Reduce, add bias, rescale, activation. - { - // Add bias. - if (bias_tensor) { - // Vector batch assign: - const int32_t* bias_data = - tflite::micro::GetTensorData(bias_tensor); - for (int i = 0; i < n_batch; ++i) { - int32_t* output_ptr = scratch_output_tensor + i * n_unit; - const int32_t* bias_ptr = bias_data; - for (int j = 0; j < n_unit; ++j) { - *output_ptr++ = *bias_ptr++; - } - } - } else { - int32_t* output_ptr = scratch_output_tensor; - for (int i = 0; i < n_batch * n_unit; ++i) { - *output_ptr++ = 0; - } - } - - // Reduce. - for (int b = 0; b < n_batch; ++b) { - int32_t* output_temp_ptr = scratch_output_tensor + b * n_unit; - int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; - - // Reduction sum vector - for (int i = 0; i < n_unit; ++i) { - for (int j = 0; j < n_rank; ++j) { - output_temp_ptr[i] += *scratch_ptr_batch++; - } - } - } - - // Rescale. - const int32_t output_max = std::numeric_limits::max(); - const int32_t output_min = std::numeric_limits::min(); - for (int i = 0; i < n_batch * n_unit; ++i) { - int32_t x1 = scratch_output_tensor[i]; - int32_t x2 = MultiplyByQuantizedMultiplier(x1, data.effective_scale_2_a, - data.effective_scale_2_b); - int32_t x3 = x2 + data.output_zero_point; - int32_t x4 = std::min(std::max(output_min, x3), output_max); - tflite::micro::GetTensorData(output_tensor)[i] = - static_cast(x4); - } - } -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/tanh.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/tanh.cc deleted file mode 100644 index 7743a87f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/tanh.cc +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/tanh.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" -#include "tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -void* TanhInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, - OpData* data) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { - static constexpr int kInputIntegerBits = 4; - const double input_real_multiplier = - static_cast(input->params.scale) * - static_cast(1 << (31 - kInputIntegerBits)); - - const double q = std::frexp(input_real_multiplier, &data->input_left_shift); - data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); - - data->input_range_radius = - CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); - } - return kTfLiteOk; -} - -TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - data->input_zero_point = input->params.zero_point; - return CalculateArithmeticOpData(context, node, data); -} - -} // namespace - -TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - switch (input->type) { - case kTfLiteFloat32: { - reference_ops::Tanh(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteInt16: { - TanhParams params; - params.input_left_shift = data.input_left_shift; - reference_ops::Tanh(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteUInt8: { - TanhParams params; - params.input_zero_point = data.input_zero_point; - params.input_range_radius = data.input_range_radius; - params.input_multiplier = data.input_multiplier; - params.input_left_shift = data.input_left_shift; - reference_ops::Tanh(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; - } break; - case kTfLiteInt8: { - reference_integer_ops::Tanh( - data.input_zero_point, data.input_range_radius, data.input_multiplier, - data.input_left_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } -} - -} // namespace activations - -TfLiteRegistration Register_TANH() { - return {/*init=*/activations::TanhInit, - /*free=*/nullptr, - /*prepare=*/activations::TanhPrepare, - /*invoke=*/activations::TanhEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/transpose_conv.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/transpose_conv.cc deleted file mode 100644 index 9d981921..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/transpose_conv.cc +++ /dev/null @@ -1,265 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h" - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/common.h" -#include "tensorflow/lite/kernels/internal/quantization_util.h" -#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/kernels/padding.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -struct OpData { - ConvParams params; - - // A scratch buffer is required for quantized implementations. - int scratch_buffer_index; - - // Multiplier and shift arrays are required for the int8 implementation. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; -}; - -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteConvParams* params, int width, - int height, int filter_width, int filter_height, - int out_width, int out_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - TfLitePaddingValues padding_values = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - params->dilation_height_factor, params->dilation_width_factor, height, - width, filter_height, filter_width, padding, &out_height, &out_width); - - data->params.padding_type = RuntimePaddingType(padding); - data->params.padding_values.width = padding_values.width; - data->params.padding_values.height = padding_values.height; - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int output_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->params.output_multiplier, &data->params.output_shift, - &data->params.quantized_activation_min, - &data->params.quantized_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), - output_channels)); - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = static_cast(node->builtin_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - int input_width = input->dims->data[2]; - int input_height = input->dims->data[1]; - int filter_width = filter->dims->data[2]; - int filter_height = filter->dims->data[1]; - int output_width = output->dims->data[2]; - int output_height = output->dims->data[1]; - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // Quantized kernels use an int32 scratch buffer. - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena( - context, - GetTensorShape(output).FlatSize() * sizeof(int32_t), - &(data->scratch_buffer_index)) == kTfLiteOk); - } - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, data)); - - // Offsets (zero points) - data->params.input_offset = -input->params.zero_point; - data->params.weights_offset = -filter->params.zero_point; - data->params.output_offset = output->params.zero_point; - - // Stride + dilation - data->params.stride_width = params->stride_width; - data->params.stride_height = params->stride_height; - data->params.dilation_width_factor = params->dilation_width_factor; - data->params.dilation_height_factor = params->dilation_height_factor; - - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - data->params.float_activation_min = output_activation_min; - data->params.float_activation_max = output_activation_max; - return kTfLiteOk; -} // namespace conv - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - reference_ops::TransposeConv( - data.params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr); - break; - } - case kTfLiteInt8: { - int32_t* scratch_buffer = static_cast( - context->GetScratchBuffer(context, data.scratch_buffer_index)); - reference_integer_ops::TransposeConv( - data.params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_TRANSPOSE_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/unpack.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/unpack.cc deleted file mode 100644 index 557cc57a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/kernels/unpack.cc +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace unpack { -namespace { - -constexpr int kInputTensor = 0; - -template -TfLiteStatus UnpackImpl(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input, int output_count, - int axis) { - const TfLiteEvalTensor* output0 = - tflite::micro::GetEvalOutput(context, node, 0); - const TfLiteIntArray* input_dims = input->dims; - const TfLiteIntArray* output_dims = output0->dims; - const int dimensions = input_dims->size; - - if (axis < 0) { - axis += input->dims->size; - } - - TFLITE_DCHECK_LT(axis, dimensions); - - int outer_size = 1; - for (int i = 0; i < axis; ++i) { - outer_size *= input_dims->data[i]; - } - int copy_size = 1; - for (int i = axis + 1; i < dimensions; ++i) { - copy_size *= input_dims->data[i]; - } - int output_size = 1; - for (int i = 0; i < output_dims->size; ++i) { - output_size *= output_dims->data[i]; - } - TFLITE_DCHECK_EQ(output_size, copy_size * outer_size); - - const T* input_data = tflite::micro::GetTensorData(input); - - for (int i = 0; i < output_count; ++i) { - TfLiteEvalTensor* t = tflite::micro::GetEvalOutput(context, node, i); - T* output_data = tflite::micro::GetTensorData(t); - for (int k = 0; k < outer_size; ++k) { - T* output_ptr = output_data + copy_size * k; - int loc = k * output_count * copy_size + i * copy_size; - const T* input_ptr = input_data + loc; - for (int j = 0; j < copy_size; ++j) output_ptr[j] = input_ptr[j]; - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteUnpackParams* data = - reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - - switch (input->type) { - case kTfLiteFloat32: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - case kTfLiteInt32: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - case kTfLiteUInt8: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - case kTfLiteInt8: { - return UnpackImpl(context, node, input, data->num, data->axis); - } - default: { - TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } - - return kTfLiteOk; -} -} // namespace -} // namespace unpack - -TfLiteRegistration Register_UNPACK() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/unpack::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_helpers.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_helpers.cc deleted file mode 100644 index c515c5ee..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_helpers.cc +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/memory_helpers.h" - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) { - std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); - uint8_t* aligned_result = reinterpret_cast( - ((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment); - return aligned_result; -} - -uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) { - std::uintptr_t data_as_uintptr_t = reinterpret_cast(data); - uint8_t* aligned_result = - reinterpret_cast((data_as_uintptr_t / alignment) * alignment); - return aligned_result; -} - -size_t AlignSizeUp(size_t size, size_t alignment) { - size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment); - return aligned_size; -} - -TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { - switch (type) { - case kTfLiteFloat32: - *size = sizeof(float); - break; - case kTfLiteInt16: - *size = sizeof(int16_t); - break; - case kTfLiteInt32: - *size = sizeof(int32_t); - break; - case kTfLiteUInt8: - *size = sizeof(uint8_t); - break; - case kTfLiteInt8: - *size = sizeof(int8_t); - break; - case kTfLiteInt64: - *size = sizeof(int64_t); - break; - case kTfLiteUInt64: - *size = sizeof(uint64_t); - break; - case kTfLiteBool: - *size = sizeof(bool); - break; - case kTfLiteComplex64: - *size = sizeof(float) * 2; - break; - case kTfLiteComplex128: - *size = sizeof(double) * 2; - break; - default: - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, - size_t* bytes, size_t* type_size, - ErrorReporter* error_reporter) { - int element_count = 1; - // If flatbuffer_tensor.shape == nullptr, then flatbuffer_tensor is a scalar - // so has 1 element. - if (flatbuffer_tensor.shape() != nullptr) { - for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) { - element_count *= flatbuffer_tensor.shape()->Get(n); - } - } - - TfLiteType tf_lite_type; - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &tf_lite_type, error_reporter)); - TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(tf_lite_type, type_size)); - *bytes = element_count * (*type_size); - return kTfLiteOk; -} - -TfLiteStatus TfLiteEvalTensorByteLength(const TfLiteEvalTensor* eval_tensor, - size_t* out_bytes) { - TFLITE_DCHECK(out_bytes != nullptr); - - int element_count = 1; - // If eval_tensor->dims == nullptr, then tensor is a scalar so has 1 element. - if (eval_tensor->dims != nullptr) { - for (int n = 0; n < eval_tensor->dims->size; ++n) { - element_count *= eval_tensor->dims->data[n]; - } - } - size_t type_size; - TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(eval_tensor->type, &type_size)); - *out_bytes = element_count * type_size; - return kTfLiteOk; -} - -TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - TfLiteTensor* output) { - const TfLiteTensor* input = nullptr; - - TF_LITE_ENSURE(context, input1->dims != nullptr); - TF_LITE_ENSURE(context, input2->dims != nullptr); - TF_LITE_ENSURE(context, output->dims->size == 0); - - input = input1->dims->size > input2->dims->size ? input1 : input2; - TF_LITE_ENSURE(context, output->type == input->type); - - size_t size = 0; - TfLiteTypeSizeOf(input->type, &size); - const int dimensions_count = tflite::GetTensorShape(input).DimensionsCount(); - for (int i = 0; i < dimensions_count; i++) { - size *= input->dims->data[i]; - } - - output->bytes = size; - - output->dims = - reinterpret_cast(context->AllocatePersistentBuffer( - context, TfLiteIntArrayGetSizeInBytes(size))); - - output->dims->size = input->dims->size; - for (int i = 0; i < dimensions_count; i++) { - output->dims->data[i] = input->dims->data[i]; - } - - return kTfLiteOk; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_helpers.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_helpers.h deleted file mode 100644 index 8f5526ce..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_helpers.h +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ -#define TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// Returns the next pointer address aligned to the given alignment. -uint8_t* AlignPointerUp(uint8_t* data, size_t alignment); - -// Returns the previous pointer address aligned to the given alignment. -uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); - -// Returns an increased size that's a multiple of alignment. -size_t AlignSizeUp(size_t size, size_t alignment); - -// Returns size in bytes for a given TfLiteType. -TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size); - -// How many bytes are needed to hold a tensor's contents. -TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, - size_t* bytes, size_t* type_size, - ErrorReporter* error_reporter); - -// How many bytes are used in a TfLiteEvalTensor instance. The byte length is -// returned in out_bytes. -TfLiteStatus TfLiteEvalTensorByteLength(const TfLiteEvalTensor* eval_tensor, - size_t* out_bytes); - -// Deduce output dimensions from input and allocate given size. -// Useful for operators with two inputs where the largest input should equal the -// output dimension. -TfLiteStatus AllocateOutputDimensionsFromInput(TfLiteContext* context, - const TfLiteTensor* input1, - const TfLiteTensor* input2, - TfLiteTensor* output); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MEMORY_HELPERS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc deleted file mode 100644 index 39991ab7..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc +++ /dev/null @@ -1,437 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" - -namespace tflite { - -// Simple stable in-place sort function. Not time-efficient for large arrays. -// Would normally be in an anonymous namespace to keep it private, but we want -// to be able to test it externally. -void ReverseSortInPlace(int* values, int* ids, int size) { - bool any_swapped; - do { - any_swapped = false; - for (int i = 1; i < size; ++i) { - if (values[i - 1] < values[i]) { - const int value_temp = values[i - 1]; - values[i - 1] = values[i]; - values[i] = value_temp; - const int id_temp = ids[i - 1]; - ids[i - 1] = ids[i]; - ids[i] = id_temp; - any_swapped = true; - } - } - } while (any_swapped); -} - -GreedyMemoryPlanner::GreedyMemoryPlanner(unsigned char* scratch_buffer, - int scratch_buffer_size) - : buffer_count_(0), need_to_calculate_offsets_(true) { - // Allocate the arrays we need within the scratch buffer arena. - max_buffer_count_ = scratch_buffer_size / per_buffer_size(); - - unsigned char* next_free = scratch_buffer; - requirements_ = reinterpret_cast(next_free); - next_free += sizeof(BufferRequirements) * max_buffer_count_; - - buffer_sizes_sorted_ = reinterpret_cast(next_free); - next_free += sizeof(int) * max_buffer_count_; - - buffer_ids_sorted_ = reinterpret_cast(next_free); - next_free += sizeof(int) * max_buffer_count_; - - buffers_sorted_by_offset_ = reinterpret_cast(next_free); - next_free += sizeof(ListEntry) * max_buffer_count_; - - buffer_offsets_ = reinterpret_cast(next_free); -} - -GreedyMemoryPlanner::~GreedyMemoryPlanner() { - // We don't own the scratch buffer, so don't deallocate anything. -} - -TfLiteStatus GreedyMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used) { - if (buffer_count_ >= max_buffer_count_) { - TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", - max_buffer_count_); - return kTfLiteError; - } - BufferRequirements* current = &requirements_[buffer_count_]; - current->size = size; - current->first_time_used = first_time_used; - current->last_time_used = last_time_used; - current->offline_offset = kOnlinePlannedBuffer; - ++buffer_count_; - need_to_calculate_offsets_ = true; - return kTfLiteOk; -} - -TfLiteStatus GreedyMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used, int offline_offset) { - BufferRequirements* current = &requirements_[buffer_count_]; - if (AddBuffer(error_reporter, size, first_time_used, last_time_used) != - kTfLiteOk) { - return kTfLiteError; - } - current->offline_offset = offline_offset; - return kTfLiteOk; -} - -bool GreedyMemoryPlanner::DoesEntryOverlapInTime( - const GreedyMemoryPlanner::ListEntry* entry, const int first_time_used, - const int last_time_used) const { - const BufferRequirements* entry_requirements = - &requirements_[entry->requirements_index]; - if (entry_requirements->first_time_used > last_time_used) { - return false; - } - if (first_time_used > entry_requirements->last_time_used) { - return false; - } - return true; -} - -GreedyMemoryPlanner::ListEntry* -GreedyMemoryPlanner::NextSimultaneouslyActiveBuffer( - const GreedyMemoryPlanner::ListEntry* start, const int first_time_used, - const int last_time_used) { - ListEntry* result = nullptr; - ListEntry* candidate_next_entry; - if (start == nullptr) { - candidate_next_entry = &buffers_sorted_by_offset_[first_entry_index_]; - } else { - if (start->next_entry_index == -1) { - return nullptr; - } - candidate_next_entry = &buffers_sorted_by_offset_[start->next_entry_index]; - } - do { - if (DoesEntryOverlapInTime(candidate_next_entry, first_time_used, - last_time_used)) { - result = candidate_next_entry; - break; - } - if (candidate_next_entry->next_entry_index == -1) { - break; - } - candidate_next_entry = - &buffers_sorted_by_offset_[candidate_next_entry->next_entry_index]; - } while (true); - return result; -} - -void GreedyMemoryPlanner::CalculateOffsetsIfNeeded() { - if (!need_to_calculate_offsets_ || (buffer_count_ == 0)) { - return; - } - need_to_calculate_offsets_ = false; - - // Start off by ordering the buffers in descending order of size. - // This helps find a more compact layout. Intuitively, you can think - // about putting the large buffers in place first, and then the - // smaller buffers can fit in the gaps, rather than fragmenting the - // gaps with small buffers at the beginning. Add offline planned offsets - // first in the list, since they have a predetermined offset. - int idx_from_tail = buffer_count_; - int idx_from_head = 0; - for (int i = 0; i < buffer_count_; ++i) { - if (requirements_[i].offline_offset == kOnlinePlannedBuffer) { - idx_from_tail--; - buffer_sizes_sorted_[idx_from_tail] = requirements_[i].size; - buffer_ids_sorted_[idx_from_tail] = i; - buffer_offsets_[i] = -1; - } else { - buffer_sizes_sorted_[idx_from_head] = requirements_[i].size; - buffer_ids_sorted_[idx_from_head] = i; - buffer_offsets_[i] = requirements_[i].offline_offset; - idx_from_head++; - } - } - - // This sorting algorithm is naive, and may end up taking a very long time - // with hundreds of buffers. Do not sort the offline planned offsets. - ReverseSortInPlace(&buffer_sizes_sorted_[idx_from_head], - &buffer_ids_sorted_[idx_from_head], - buffer_count_ - idx_from_head); - - // Initialize the first entry to the first buffer in - // buffer_ids_sorted_. - // - If there are no offline planned offsets, the largest buffer will be - // first, and the buffers will be handled in size order. - // - If offline offsets are present, these will be handled first in order - // for the greedy algorithm to utilized gaps in the offline plan. - first_entry_index_ = 0; - next_free_entry_ = 1; - ListEntry* first_entry = &buffers_sorted_by_offset_[first_entry_index_]; - first_entry->next_entry_index = -1; // to mark the entry as end of list - int buffer_id = buffer_ids_sorted_[0]; - first_entry->requirements_index = buffer_id; - if (requirements_[buffer_id].offline_offset == kOnlinePlannedBuffer) { - buffer_offsets_[buffer_id] = 0; - } - first_entry->offset = buffer_offsets_[buffer_id]; - - // Work through the rest of the buffers to find a good gap to place each one. - for (int i = 1; i < buffer_count_; ++i) { - // The id is the order the buffer was originally added by the client. - buffer_id = buffer_ids_sorted_[i]; - // Look at what size and time range the buffer needs to be active. - BufferRequirements* wanted_requirements = &requirements_[buffer_id]; - const int wanted_size = wanted_requirements->size; - const int wanted_first_time_used = wanted_requirements->first_time_used; - const int wanted_last_time_used = wanted_requirements->last_time_used; - - // Find the first buffer that's active in our time range. All placed - // buffers are stored in the order of their starting position in the arena - // so that it's easy to find the next buffer in memory, and so the gap. - // The candidate_entry variable holds the buffer that we're considering - // placing the current buffer after. - - int candidate_offset = 0; - // Loop through the offset-ordered list of buffers, looking for gaps. - if (wanted_requirements->offline_offset == kOnlinePlannedBuffer) { - ListEntry* prior_entry = nullptr; - while (true) { - // Find out what the next active buffer is. - ListEntry* next_entry = NextSimultaneouslyActiveBuffer( - prior_entry, wanted_first_time_used, wanted_last_time_used); - - if (prior_entry) { - BufferRequirements* candidate_requirements = - &requirements_[prior_entry->requirements_index]; - const int prior_entry_offset = - prior_entry->offset + candidate_requirements->size; - if (prior_entry_offset > candidate_offset) { - candidate_offset = prior_entry_offset; - } - } - if (next_entry == nullptr) { - // We're at the end of the list, so we can always append the buffer - // here. - break; - } - // Find out how much space there is between us and the next buffer. - const int gap = next_entry->offset - candidate_offset; - if (gap >= wanted_size) { - // This entry has a big enough gap between it and the next, so - // use it! - break; - } - // The gap wasn't big enough, so move on to another candidate. - prior_entry = next_entry; - } - } else { - // Offline planned offset are to be considered constant - candidate_offset = wanted_requirements->offline_offset; - } - // At this point, we've either found a gap (possibly at the end of the - // list) and want to place the buffer there, or there are no other active - // buffers in this time range and so we can put it at offset zero. - // Record the buffer's offset in our plan. - buffer_offsets_[buffer_id] = candidate_offset; - // Add the newly-placed buffer to our offset-ordered list, so that - // subsequent passes can fit in their buffers around it. - ListEntry* new_entry = &buffers_sorted_by_offset_[next_free_entry_]; - new_entry->offset = candidate_offset; - new_entry->requirements_index = buffer_id; - const int new_entry_index = next_free_entry_; - ++next_free_entry_; - - if (first_entry->offset > candidate_offset) { - // The new entry offset is smaller than the first entry offset => - // replace the first entry - first_entry = new_entry; - first_entry->next_entry_index = first_entry_index_; - first_entry_index_ = new_entry_index; - } else { - ListEntry* current_entry = first_entry; - // Make sure that we insert the buffer at the correct place in the - // buffer-offset-ordered list - while (true) { - const int next_entry_index = current_entry->next_entry_index; - if (next_entry_index == -1) { - // We're at the end of the list, so just add the new entry here. - current_entry->next_entry_index = new_entry_index; - new_entry->next_entry_index = -1; - break; - } - // not at the end of the list -> take a look at next entry - ListEntry* next_entry = &buffers_sorted_by_offset_[next_entry_index]; - if (next_entry->offset > candidate_offset) { - // We're at the right spot to do an insertion and retain the sorting - // order, so place the new entry here. - new_entry->next_entry_index = current_entry->next_entry_index; - current_entry->next_entry_index = new_entry_index; - break; - } - current_entry = next_entry; - } - } - } -} - -size_t GreedyMemoryPlanner::GetMaximumMemorySize() { - CalculateOffsetsIfNeeded(); - if (buffer_count_ == 0) { - return 0; - } - ListEntry* entry = &buffers_sorted_by_offset_[first_entry_index_]; - size_t max_size = 0; - while (entry) { - BufferRequirements* requirements = - &requirements_[entry->requirements_index]; - // TODO(b/148246793): Update all size and offset variables types from - // int to size_t - const size_t current_size = entry->offset + requirements->size; - if (current_size > max_size) { - max_size = current_size; - } - if (entry->next_entry_index == -1) { - break; - } - entry = &buffers_sorted_by_offset_[entry->next_entry_index]; - } - return max_size; -} - -void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { - CalculateOffsetsIfNeeded(); - - for (int i = 0; i < buffer_count_; ++i) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Planner buffer ID: %d, calculated offset: %d, size required: %d, " - "first_time_created: %d, " - "last_time_used: %d", - i, buffer_offsets_[i], requirements_[i].size, - requirements_[i].first_time_used, requirements_[i].last_time_used); - } - - constexpr int kLineWidth = 80; - int max_size = kLineWidth; - int max_time = 0; - for (int i = 0; i < buffer_count_; ++i) { - BufferRequirements* requirements = &requirements_[i]; - const int offset = buffer_offsets_[i]; - const int last_time_used = requirements->last_time_used; - const int size = offset + requirements->size; - if (size > max_size) { - max_size = size; - } - if (last_time_used > max_time) { - max_time = last_time_used; - } - } - - char line[kLineWidth + 1]; - for (int t = 0; t <= max_time; ++t) { - for (int c = 0; c < kLineWidth; ++c) { - line[c] = '.'; - } - for (int i = 0; i < buffer_count_; ++i) { - BufferRequirements* requirements = &requirements_[i]; - if ((t < requirements->first_time_used) || - (t > requirements->last_time_used)) { - continue; - } - const int offset = buffer_offsets_[i]; - if (offset == -1) { - continue; - } - const int size = requirements->size; - const int line_start = (offset * kLineWidth) / max_size; - const int line_end = ((offset + size) * kLineWidth) / max_size; - for (int n = line_start; n < line_end; ++n) { - if (line[n] == '.') { - char display; - if (i < 10) { - display = '0' + i; - } else if (i < 36) { - display = 'a' + (i - 10); - } else if (i < 62) { - display = 'A' + (i - 36); - } else { - display = '*'; - } - line[n] = display; - } else { - line[n] = '!'; - } - } - } - line[kLineWidth] = 0; - TF_LITE_REPORT_ERROR(error_reporter, "%s", (const char*)line); - } -} - -int GreedyMemoryPlanner::GetBufferCount() { return buffer_count_; } - -TfLiteStatus GreedyMemoryPlanner::GetOffsetForBuffer( - tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { - CalculateOffsetsIfNeeded(); - if ((buffer_index < 0) || (buffer_index >= buffer_count_)) { - TF_LITE_REPORT_ERROR(error_reporter, - "buffer index %d is outside range 0 to %d", - buffer_index, buffer_count_); - return kTfLiteError; - } - *offset = buffer_offsets_[buffer_index]; - return kTfLiteOk; -} - -bool GreedyMemoryPlanner::DoAnyBuffersOverlap(ErrorReporter* error_reporter) { - CalculateOffsetsIfNeeded(); - bool were_overlaps_found = false; - for (int i = 0; i < buffer_count_; ++i) { - BufferRequirements* a_requirements = &requirements_[i]; - const int a_start_offset = buffer_offsets_[i]; - const int a_first_time_used = a_requirements->first_time_used; - const int a_last_time_used = a_requirements->last_time_used; - const int a_end_offset = a_start_offset + a_requirements->size; - for (int j = 0; j < buffer_count_; ++j) { - if (i == j) { - continue; - } - BufferRequirements* b_requirements = &requirements_[j]; - const int b_start_offset = buffer_offsets_[j]; - const int b_first_time_used = b_requirements->first_time_used; - const int b_last_time_used = b_requirements->last_time_used; - const int b_end_offset = b_start_offset + b_requirements->size; - if ((a_first_time_used > b_last_time_used) || - (b_first_time_used > a_last_time_used)) { - // Buffers don't overlap in time. - continue; - } - if ((a_start_offset >= b_end_offset) || - (b_start_offset >= a_end_offset)) { - // No overlap in memory. - continue; - } - were_overlaps_found = true; - TF_LITE_REPORT_ERROR( - error_reporter, "Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)", - i, a_first_time_used, a_last_time_used, a_start_offset, a_end_offset, - j, b_first_time_used, b_last_time_used, b_start_offset, b_end_offset); - } - } - return were_overlaps_found; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h deleted file mode 100644 index f5f26a8b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +++ /dev/null @@ -1,163 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_GREEDY_MEMORY_PLANNER_H_ -#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_GREEDY_MEMORY_PLANNER_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/memory_planner/memory_planner.h" - -namespace tflite { - -constexpr int kOnlinePlannedBuffer = -1; - -// A memory planner that uses a greedy algorithm to arrange buffers in memory -// to minimize the overall arena size needed. -// -// The algorithm works like this: -// - The client enters the buffer information through AddBuffer(). -// - When a function like GetOffsetForBuffer() is called, the -// CalculateOffsetsIfNeeded() method is invoked. -// - If an up to date plan is not already present, one will be calculated. -// - The buffers are sorted in descending order of size. -// - The largest buffer is placed at offset zero. -// - The rest of the buffers are looped through in descending size order. -// - The other buffers that need to be in memory at the same time are found. -// - The first gap between simultaneously active buffers that the current -// buffer fits into will be used. -// - If no large-enough gap is found, the current buffer is placed after the -// last buffer that's simultaneously active. -// - This continues until all buffers are placed, and the offsets stored. -// -// This is not guaranteed to produce the best placement, since that's an -// NP-Complete problem, but in practice it should produce one that's decent. -class GreedyMemoryPlanner : public MemoryPlanner { - public: - // You need to pass in an area of memory to be used for planning. This memory - // needs to have a lifetime as long as the planner, but isn't owned by this - // object, so management should be handled by the client. This is so it can be - // stack or globally allocated if necessary on devices without dynamic memory - // allocation. How many buffers can be planned for will depend on the size of - // this scratch memory, so you should enlarge it if you see an error when - // calling AddBuffer(). The memory can be reused once you're done with the - // planner, as long as you copy the calculated offsets to another location. - // Each buffer requires about 36 bytes of scratch. - GreedyMemoryPlanner(unsigned char* scratch_buffer, int scratch_buffer_size); - ~GreedyMemoryPlanner() override; - - // Record details of a buffer we want to place. - TfLiteStatus AddBuffer(ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used) override; - - // Record details of an offline planned buffer offset we want to place. - // offline_offset is the buffer offset from the start of the arena. - TfLiteStatus AddBuffer(ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used, - int offline_offset); - - // Returns the high-water mark of used memory. This is the minimum size of a - // memory arena you'd need to allocate to hold these buffers. - size_t GetMaximumMemorySize() override; - - // How many buffers have been recorded. - int GetBufferCount() override; - - // Where a given buffer should be placed in the memory arena. - // This information is stored in the memory arena itself, so once the arena - // is used for inference, it will be overwritten. - TfLiteStatus GetOffsetForBuffer(ErrorReporter* error_reporter, - int buffer_index, int* offset) override; - - // Prints an ascii-art diagram of the buffer layout plan. - void PrintMemoryPlan(ErrorReporter* error_reporter); - - // Debug method to check whether any buffer allocations are overlapping. This - // is an O(N^2) complexity operation, so only use for testing. - bool DoAnyBuffersOverlap(ErrorReporter* error_reporter); - - // Used to store a list of buffers ordered by their offset. - struct ListEntry { - int offset; - int requirements_index; - int next_entry_index; - }; - - // Number of bytes required in order to plan a buffer. - static size_t per_buffer_size() { - const int per_buffer_size = - sizeof(BufferRequirements) + // requirements_ - sizeof(int) + // buffer_sizes_sorted_ - sizeof(int) + // buffer_ids_sorted_ - sizeof(ListEntry) + // buffers_sorted_by_offset_ - sizeof(int); // buffer_offsets_; - return per_buffer_size; - } - - private: - // Whether a buffer is active in a given time range. - bool DoesEntryOverlapInTime(const ListEntry* entry, const int first_time_used, - const int last_time_used) const; - - // Walks the list to return the next buffer that is active in a given time - // range, or a null pointer if there are none. - ListEntry* NextSimultaneouslyActiveBuffer(const ListEntry* start, - const int first_time_used, - const int last_time_used); - - // If there isn't an up to date plan, calculate a new one. - void CalculateOffsetsIfNeeded(); - - // How many buffers we can plan for, based on the arena size we're given in - // the constructor. - int max_buffer_count_; - - // The number of buffers added so far. - int buffer_count_; - - // Records the client-provided information about each buffer. - struct BufferRequirements { - int size; - int offline_offset; - int first_time_used; - int last_time_used; - }; - - // Working arrays used during the layout algorithm. - BufferRequirements* requirements_; - // buffer_sizes_sorted_ and buffer_ids_sorted_ are sorted according to: - // { - // offline planned buffers, - // online planned buffers sorted by size - // } - int* buffer_sizes_sorted_; - int* buffer_ids_sorted_; - ListEntry* buffers_sorted_by_offset_; - int next_free_entry_; // Index of the next free entry of - // buffers_sorted_by_offset_ - int first_entry_index_; // Index of the first entry (smallest offset) of - // buffers_sorted_by_offset_ - - // Stores the outcome of the plan, the location of each buffer in the arena. - int* buffer_offsets_; - - // Whether buffers have been added since the last plan was calculated. - bool need_to_calculate_offsets_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_GREEDY_MEMORY_PLANNER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc deleted file mode 100644 index d25a4f22..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/memory_planner/linear_memory_planner.h" - -namespace tflite { - -LinearMemoryPlanner::LinearMemoryPlanner() - : current_buffer_count_(0), next_free_offset_(0) {} -LinearMemoryPlanner::~LinearMemoryPlanner() {} - -TfLiteStatus LinearMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used) { - if (current_buffer_count_ >= kMaxBufferCount) { - TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", - kMaxBufferCount); - return kTfLiteError; - } - buffer_offsets_[current_buffer_count_] = next_free_offset_; - next_free_offset_ += size; - ++current_buffer_count_; - return kTfLiteOk; -} - -size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } - -int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } - -TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( - tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { - if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { - TF_LITE_REPORT_ERROR(error_reporter, - "buffer index %d is outside range 0 to %d", - buffer_index, current_buffer_count_); - return kTfLiteError; - } - *offset = buffer_offsets_[buffer_index]; - return kTfLiteOk; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.h deleted file mode 100644 index 4d77e778..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ -#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/memory_planner/memory_planner.h" - -namespace tflite { - -// The simplest possible memory planner that just lays out all buffers at -// increasing offsets without trying to reuse memory. -class LinearMemoryPlanner : public MemoryPlanner { - public: - LinearMemoryPlanner(); - ~LinearMemoryPlanner() override; - - TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used) override; - - size_t GetMaximumMemorySize() override; - int GetBufferCount() override; - TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, - int buffer_index, int* offset) override; - - private: - static constexpr int kMaxBufferCount = 1024; - size_t buffer_offsets_[kMaxBufferCount]; - int current_buffer_count_; - size_t next_free_offset_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/memory_planner.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/memory_planner.h deleted file mode 100644 index 2c39fbe0..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/memory_planner/memory_planner.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ -#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" - -namespace tflite { - -// Interface class for planning the layout of memory buffers during the -// execution of a graph. -// It's designed to be used by a client that iterates in any order through the -// buffers it wants to lay out, and then calls the getter functions for -// information about the calculated layout. For example: -// -// SomeMemoryPlanner planner; -// planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 -// planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 -// planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 -// -// int offset0; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); -// int offset1; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); -// int offset2; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); -// const int arena_size_needed = planner.GetMaximumMemorySize(); -// -// The goal is for applications to be able to experiment with different layout -// strategies without changing their client code, by swapping out classes that -// implement this interface.= -class MemoryPlanner { - public: - MemoryPlanner() {} - virtual ~MemoryPlanner() {} - - // Pass information about a buffer's size and lifetime to the layout - // algorithm. The order this is called implicitly assigns an index to the - // result, so the buffer information that's passed into the N-th call of - // this method will be used as the buffer_index argument to - // GetOffsetForBuffer(). - virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, - int size, int first_time_used, - int last_time_used) = 0; - - // The largest contiguous block of memory that's needed to hold the layout. - virtual size_t GetMaximumMemorySize() = 0; - // How many buffers have been added to the planner. - virtual int GetBufferCount() = 0; - // Calculated layout offset for the N-th buffer added to the planner. - virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, - int buffer_index, int* offset) = 0; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_allocator.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_allocator.cc deleted file mode 100644 index fb547279..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_allocator.cc +++ /dev/null @@ -1,1158 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/micro_allocator.h" - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/core/api/op_resolver.h" -#include "tensorflow/lite/core/api/tensor_utils.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" -#include "tensorflow/lite/micro/memory_planner/memory_planner.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" -#include "tensorflow/lite/schema/schema_generated.h" -#include "tensorflow/lite/schema/schema_utils.h" - -namespace tflite { - -namespace { - -// Maximum number of scratch buffer requests per operator. Operator kernels that -// request more than this value will receive an exception. -constexpr size_t kMaxScratchBuffersPerOp = 12; - -// Sentinel value used as a placeholder to mark a ScratchBufferRequest request -// needs a node id assignment. -constexpr int kUnassignedScratchBufferRequestIndex = -1; - -// Used to hold information used during allocation calculations. -struct AllocationInfo { - size_t bytes; - void** output_ptr; - int first_created; - int last_used; - int32_t offline_offset; - bool needs_allocating; -}; - -// We align tensor buffers to 16-byte boundaries, since this is a common -// requirement for SIMD extensions. -constexpr int kBufferAlignment = 16; -constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation"; -const TfLiteIntArray kZeroLengthIntArray = {}; - -class MicroBuiltinDataAllocator : public BuiltinDataAllocator { - public: - explicit MicroBuiltinDataAllocator(SimpleMemoryAllocator* memory_allocator) - : memory_allocator_(memory_allocator) {} - - void* Allocate(size_t size, size_t alignment_hint) override { - return memory_allocator_->AllocateFromTail(size, alignment_hint); - } - void Deallocate(void* data) override { - // Do not deallocate, builtin data needs to be available for the life time - // of the model. - } - - private: - SimpleMemoryAllocator* memory_allocator_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -#if !defined(__clang__) -// Helper function to check flatbuffer metadata correctness. This function is -// not called by default. Hence it's not linked in to the final binary code. -TfLiteStatus CheckOfflinePlannedOffsets(const Model* model, - ErrorReporter* error_reporter) { - // Suppress compile warning for unused function - (void)CheckOfflinePlannedOffsets; - - if (model->metadata()) { - for (size_t i = 0; i < model->metadata()->size(); ++i) { - auto metadata = model->metadata()->Get(i); - if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - strlen(kOfflineMemAllocMetadata)) == 0) { - auto* subgraphs = model->subgraphs(); - const SubGraph* subgraph = (*subgraphs)[0]; - const flatbuffers::Vector>* tensors = - subgraph->tensors(); - const flatbuffers::Vector>* buffers = - model->buffers(); - int nbr_tflite_tensors = tensors->size(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = (uint32_t*)array->data(); - int version = metadata_buffer[0]; - int subgraph_idx = metadata_buffer[1]; - const int nbr_offline_offsets = metadata_buffer[2]; -#ifndef TF_LITE_STRIP_ERROR_STRINGS - int* offline_planner_offsets = (int*)&metadata_buffer[3]; -#endif - - TF_LITE_REPORT_ERROR(error_reporter, "==== Model metadata info: ====="); - TF_LITE_REPORT_ERROR(error_reporter, - "Offline planner metadata found, version %d, " - "subgraph %d, nbr offline offsets %d", - version, subgraph_idx, nbr_offline_offsets); - for (int j = 0; j < nbr_offline_offsets; ++j) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Offline planner tensor index %d, offline offset: %d", j, - offline_planner_offsets[j]); - } - - if (version != 1) { - TF_LITE_REPORT_ERROR(error_reporter, "Version not supported! (%d)\n", - version); - return kTfLiteError; - } - if (subgraph_idx != 0) { - TF_LITE_REPORT_ERROR(error_reporter, - "Only 1 subgraph supported! Subgraph idx (%d)\n", - subgraph_idx); - return kTfLiteError; - } - if (nbr_tflite_tensors != nbr_offline_offsets) { - TF_LITE_REPORT_ERROR(error_reporter, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_offline_offsets, nbr_tflite_tensors); - return kTfLiteError; - } - } - } - } - return kTfLiteOk; -} -#endif - -// A helper class to construct AllocationInfo array. This array contains the -// lifetime of tensors / scratch_buffer and will be used to calculate the memory -// plan. Methods need to be called in order from `Init`, `Add*`, to `Finish`. -class AllocationInfoBuilder { - public: - AllocationInfoBuilder(AllocationInfo* info, size_t tensor_count, - size_t scratch_buffer_count, ErrorReporter* reporter) - : info_(info), - tensor_count_(tensor_count), - buffer_count_(scratch_buffer_count), - reporter_(reporter) {} - - // Check if model contains offline planned buffer offsets. - // - If there's no metadata available, offline_planner_offsets is not set - // - If there's metadata available, offline_planner_offsets will point to the - // first offset in the metadata buffer list. - TfLiteStatus GetOfflinePlannedOffsets( - const Model* model, const int32_t** offline_planner_offsets); - - // Add allocaiton information for the tensors. - TfLiteStatus AddTensors(const SubGraph* subgraph, - const int32_t* offline_offsets, - TfLiteEvalTensor* eval_tensors); - - // Add allocation information for the scratch buffers. - TfLiteStatus AddScratchBuffers( - internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles); - - // Returns a pointer to the built AllocationInfo array. - const AllocationInfo* Finish() const { return info_; } - - private: - AllocationInfo* info_ = nullptr; - size_t tensor_count_ = 0; - size_t buffer_count_ = 0; - ErrorReporter* reporter_ = nullptr; -}; - -TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph, - const int32_t* offline_offsets, - TfLiteEvalTensor* eval_tensors) { - TFLITE_DCHECK(eval_tensors != nullptr); - - // Set up allocation info for all tensors. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - current->output_ptr = &(eval_tensors[i].data.data); - - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], ¤t->bytes)); - - current->first_created = -1; - current->last_used = -1; - current->needs_allocating = (eval_tensors[i].data.data == nullptr) && - (!subgraph->tensors()->Get(i)->is_variable()); - if (offline_offsets) { - current->offline_offset = offline_offsets[i]; - } else { - current->offline_offset = kOnlinePlannedBuffer; - } - } - - for (size_t i = 0; i < subgraph->inputs()->size(); ++i) { - const int tensor_index = subgraph->inputs()->Get(i); - AllocationInfo* current = &info_[tensor_index]; - current->first_created = 0; - } - - // Mark all outputs as persistent to the end of the invocation. - for (size_t i = 0; i < subgraph->outputs()->size(); ++i) { - const int tensor_index = subgraph->outputs()->Get(i); - AllocationInfo* current = &info_[tensor_index]; - current->last_used = subgraph->operators()->size() - 1; - } - - // Figure out when the first and last use of each tensor is. - for (int i = (subgraph->operators()->size() - 1); i >= 0; --i) { - const auto* op = subgraph->operators()->Get(i); - for (size_t n = 0; n < op->inputs()->size(); ++n) { - const int tensor_index = op->inputs()->Get(n); - AllocationInfo* current = &info_[tensor_index]; - if (((current->last_used == -1) || (current->last_used < i))) { - current->last_used = i; - } - } - for (size_t n = 0; n < op->outputs()->size(); ++n) { - const int tensor_index = op->outputs()->Get(n); - AllocationInfo* current = &info_[tensor_index]; - if ((current->first_created == -1) || (current->first_created > i)) { - current->first_created = i; - } - } - } - - // Sanity check for valid tensor lifetime. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - // Even though tensor appears to be read only it may still need to be - // allocated. - const bool appears_read_only = - (current->first_created == -1) && (current->last_used != -1); - const bool has_partial_lifetime = - !appears_read_only && - ((current->first_created == -1) || (current->last_used == -1)); - if (has_partial_lifetime && current->needs_allocating) { - TF_LITE_REPORT_ERROR( - reporter_, - "Logic error in memory planner, tensor %d has an invalid lifetime: " - "first_created: %d, last_used: %d", - i, current->first_created, current->last_used); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -// The tensor offsets will be encoded in the metadata:[Metadata] field of the -// Model. The following encoding applies: -// -// | Metadata component | Value | -// | name:string | “OfflineMemoryAllocation” | -// | buffer:unit | Index of buffer containing memory allocation data | -// -// The buffer contents for the memory allocation is a list of 32-bit integers. -// The number of tensors, n, must be equal to the number of tensors defined in -// the model. The following encoding applies: -// -// | Offset | Value | -// | 0 | Offline allocation format version – set to 0 | -// | 1 | Subgraph index to which this allocation applies | -// | 2 | Number offsets following: n | -// | 3 | Arena byte offset of tensor #0 or -1 to allocate at runtime | -// | 4 | Arena byte offset of tensor #1 or -1 to allocate at runtime | -// | 3+(n-1) | Arena byte offset of tensor #(n-1) or -1 to allocate at runtime | -TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets( - const Model* model, const int32_t** offline_planner_offsets) { - if (model->metadata()) { - for (size_t i = 0; i < model->metadata()->size(); ++i) { - auto metadata = model->metadata()->Get(i); - if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - strlen(kOfflineMemAllocMetadata)) == 0) { - const flatbuffers::Vector>* buffers = - model->buffers(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = - reinterpret_cast(array->data()); - const size_t nbr_tensors = static_cast(metadata_buffer[2]); - *offline_planner_offsets = - reinterpret_cast(&metadata_buffer[3]); - - if (tensor_count_ != nbr_tensors) { - TF_LITE_REPORT_ERROR(reporter_, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_tensors, tensor_count_); - return kTfLiteError; - } - } - } - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::AddScratchBuffers( - internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles) { - // Set up allocation info for buffers. - for (size_t i = tensor_count_; i < tensor_count_ + buffer_count_; ++i) { - internal::ScratchBufferRequest* current_request = - &(scratch_buffer_requests[i - tensor_count_]); - ScratchBufferHandle* current_handle = - &(scratch_buffer_handles[i - tensor_count_]); - - AllocationInfo* current = &info_[i]; - current->output_ptr = reinterpret_cast(¤t_handle->data); - current->bytes = current_request->bytes; - current->first_created = current_request->node_idx; - current->last_used = current_request->node_idx; - current->offline_offset = kOnlinePlannedBuffer; - current->needs_allocating = true; - } - return kTfLiteOk; -} - -TfLiteStatus CreatePlan(ErrorReporter* error_reporter, - GreedyMemoryPlanner* planner, - const AllocationInfo* allocation_info, - size_t allocation_info_size) { - // Add the tensors to our allocation plan. - for (size_t i = 0; i < allocation_info_size; ++i) { - const AllocationInfo* current = &allocation_info[i]; - if (current->needs_allocating) { - size_t aligned_bytes_required = - AlignSizeUp(current->bytes, kBufferAlignment); - if (current->offline_offset == kOnlinePlannedBuffer) { - TF_LITE_ENSURE_STATUS( - planner->AddBuffer(error_reporter, aligned_bytes_required, - current->first_created, current->last_used)); - } else { - TF_LITE_ENSURE_STATUS(planner->AddBuffer( - error_reporter, aligned_bytes_required, current->first_created, - current->last_used, current->offline_offset)); - } - } - } - return kTfLiteOk; -} - -TfLiteStatus CommitPlan(ErrorReporter* error_reporter, MemoryPlanner* planner, - uint8_t* starting_point, - const AllocationInfo* allocation_info, - size_t allocation_info_size) { - // Figure out the actual memory addresses for each buffer, based on the plan. - int planner_index = 0; - for (size_t i = 0; i < allocation_info_size; ++i) { - const AllocationInfo* current = &allocation_info[i]; - if (current->needs_allocating) { - int offset = -1; - TF_LITE_ENSURE_STATUS( - planner->GetOffsetForBuffer(error_reporter, planner_index, &offset)); - *current->output_ptr = reinterpret_cast(starting_point + offset); - ++planner_index; - } - } - return kTfLiteOk; -} -} // namespace - -namespace internal { - -// Handles architecture safe mapping of flatbuffer vectors to a TfLite*Array -// struct. Matching types are required (e.g. float and TfLiteFloatArray). -// Big-endian systems will always allocate dimension array data in the tail -// (persistent) section. -template -TfLiteStatus FlatBufferVectorToTfLiteTypeArray( - SimpleMemoryAllocator* allocator, ErrorReporter* error_reporter, - const flatbuffers::Vector* flatbuffer_array, - kTfLiteArrayType** result) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(flatbuffer_array != nullptr); - // TODO(b/159668691): Consider adding type assertion or breaking this function - // into multiple functions for each type. std::is_same is c++11 and has a - // special updated constructor in c++17 that requires a string argument. - if (FLATBUFFERS_LITTLEENDIAN) { - // On little-endian machines, TfLite*Array happens to have the same memory - // layout as flatbuffers:Vector, so we can - // reinterpret_cast the flatbuffer vector and avoid a copy and malloc. - *result = const_cast( - reinterpret_cast(flatbuffer_array)); - } else { - // Big-endian architecture can not use the same memory layout as - // flatbuffers::Vector. Allocate from the tail and - // copy values from the flatbuffer into the newly allocated chunk. - kTfLiteArrayType* array = - reinterpret_cast(allocator->AllocateFromTail( - TfLiteIntArrayGetSizeInBytes(flatbuffer_array->Length()), - alignof(kTfLiteArrayType))); - if (array == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Failed to allocate %d bytes of memory to copy an array.", - TfLiteIntArrayGetSizeInBytes(flatbuffer_array->Length())); - return kTfLiteError; - } - array->size = flatbuffer_array->Length(); - for (int i = 0; i < array->size; ++i) { - array->data[i] = flatbuffer_array->Get(i); - } - *result = array; - } - return kTfLiteOk; -} - -// Returns a pointer to any buffer associated with the flatbuffer tensor. Can -// return nullptr if no buffer is found. -void* GetFlatbufferTensorBuffer( - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers) { - // We need to figure out where the actual contents of this tensor are stored - // in memory. We'll check to see if there's a serialized buffer (pretty much - // the same as a constant op in TensorFlow) associated with this tensor first, - // and if there is update the runtime structure to point to its location in - // memory. - // First see if there's any buffer information in the serialized tensor. - // TODO(b/170379532): Add better unit tests to validate flatbuffer values. - void* out_buffer = nullptr; - if (auto* buffer = (*buffers)[flatbuffer_tensor.buffer()]) { - // If we've found a buffer, does it have any data? - if (auto* array = buffer->data()) { - // If it has any data, is the data size larger than zero? - if (array->size()) { - // We've found a buffer with valid data, so update the runtime tensor - // data structure to point to it. - out_buffer = const_cast(static_cast(array->data())); - } - } - // TODO(petewarden): It's not clear in what circumstances we could have a - // buffer in the serialized tensor, but it doesn't have any data in it. Is - // that a validly-generated file, and if so what does it mean, or is it an - // error condition? It would be good to tighten up the specification to make - // it less ambiguous. - } - return out_buffer; -} - -TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, bool allocate_temp, - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result) { - TFLITE_DCHECK(result != nullptr); - - *result = {}; - // Make sure the serialized type is one we know how to deal with, and convert - // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); - // Make sure we remember if the serialized tensor is designated as a variable. - result->is_variable = flatbuffer_tensor.is_variable(); - - result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); - - // TODO(petewarden): Some of these paths aren't getting enough testing - // coverage, so we should figure out some tests that exercise them. - if (result->data.data == nullptr) { - // The tensor contents haven't been set from a serialized buffer, so - // make a note that they will be allocated from memory. The actual - // allocation won't happen until later. - result->allocation_type = kTfLiteArenaRw; - } else { - // We set the data from a serialized buffer, so record tha. - result->allocation_type = kTfLiteMmapRo; - } - - // Figure out what the size in bytes of the buffer is and store it. - size_t type_size; - TF_LITE_ENSURE_STATUS(BytesRequiredForTensor( - flatbuffer_tensor, &result->bytes, &type_size, error_reporter)); - - if (flatbuffer_tensor.shape() == nullptr) { - // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar - // tensor. - result->dims = const_cast(&kZeroLengthIntArray); - } else { - // TFLM doesn't allow reshaping the tensor which requires dynamic memory - // allocation so it is safe to drop the const qualifier. In the future, if - // we really want to update the tensor shape, we can always pass in a new - // TfLiteIntArray - especially we have to do so if the dimension is - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, flatbuffer_tensor.shape(), &(result->dims))); - } - - // Copy the quantization information from the serialized data. - const auto* src_quantization = flatbuffer_tensor.quantization(); - if (src_quantization && src_quantization->scale() && - (src_quantization->scale()->size() > 0) && - src_quantization->zero_point() && - (src_quantization->zero_point()->size() > 0)) { - // Always populate the TfLiteTensor.params field, even if there are - // per-channel quantization parameters. - result->params.scale = src_quantization->scale()->Get(0); - // Note that the zero_point field in the FlatBuffers schema is a 64-bit - // integer, but the zero_point field in the TfLiteQuantizationParams struct - // is a 32-bit integer. - result->params.zero_point = - static_cast(src_quantization->zero_point()->Get(0)); - - // Populate per-channel quantization params. - int channels = src_quantization->scale()->size(); - TfLiteAffineQuantization* quantization = - allocate_temp - ? reinterpret_cast( - allocator->AllocateTemp(sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))) - : reinterpret_cast( - allocator->AllocateFromTail( - sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))); - if (quantization == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate TfLiteAffineQuantization.\n"); - return kTfLiteError; - } - - // TODO(b/153688719): Reduce tail allocation by using a global zero-point - // buffer. This value can not be reused from the flatbuffer since the - // zero_point is stored as a int64_t. - quantization->zero_point = - allocate_temp - ? reinterpret_cast(allocator->AllocateTemp( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))) - : reinterpret_cast(allocator->AllocateFromTail( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))); - if (quantization->zero_point == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate quantization->zero_point.\n"); - return kTfLiteError; - } - - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, src_quantization->scale(), - &quantization->scale)); - - quantization->zero_point->size = channels; - int* zero_point_data = quantization->zero_point->data; - for (int i = 0; i < channels; i++) { - zero_point_data[i] = src_quantization->zero_point()->Get(i); - } - // TODO(rocky): Need to add a micro_allocator test case that fails when - // this is not copied: - quantization->quantized_dimension = src_quantization->quantized_dimension(); - - result->quantization = {kTfLiteAffineQuantization, quantization}; - } - return kTfLiteOk; -} - -TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteEvalTensor* result) { - *result = {}; - // Make sure the serialized type is one we know how to deal with, and convert - // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); - - result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); - - if (flatbuffer_tensor.shape() == nullptr) { - // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar - // tensor. - result->dims = const_cast(&kZeroLengthIntArray); - } else { - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, flatbuffer_tensor.shape(), &(result->dims))); - } - return kTfLiteOk; -} - -} // namespace internal - -MicroAllocator::MicroAllocator(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter) - : memory_allocator_(memory_allocator), - error_reporter_(error_reporter), - model_is_allocating_(false) {} - -MicroAllocator::~MicroAllocator() {} - -MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter) { - uint8_t* aligned_arena = AlignPointerUp(tensor_arena, kBufferAlignment); - size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; - return Create(SimpleMemoryAllocator::Create(error_reporter, aligned_arena, - aligned_arena_size), - error_reporter); -} - -MicroAllocator* MicroAllocator::Create(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter) { - TFLITE_DCHECK(memory_allocator != nullptr); - TFLITE_DCHECK(error_reporter != nullptr); - - uint8_t* allocator_buffer = memory_allocator->AllocateFromTail( - sizeof(MicroAllocator), alignof(MicroAllocator)); - MicroAllocator* allocator = - new (allocator_buffer) MicroAllocator(memory_allocator, error_reporter); - return allocator; -} - -TfLiteStatus MicroAllocator::StartModelAllocation( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration** node_and_registrations, - TfLiteEvalTensor** eval_tensors) { - TFLITE_DCHECK(model != nullptr); - - if (model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation started before " - "finishing previously allocated model"); - return kTfLiteError; - } - - model_is_allocating_ = true; - - TF_LITE_ENSURE_STATUS(InitScratchBufferData()); - TF_LITE_ENSURE_STATUS(AllocateTfLiteEvalTensors(model, eval_tensors)); - TF_LITE_ENSURE_STATUS( - AllocateNodeAndRegistrations(model, node_and_registrations)); - TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer( - model, op_resolver, *node_and_registrations)); - - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::FinishModelAllocation( - const Model* model, TfLiteEvalTensor* eval_tensors, - ScratchBufferHandle** scratch_buffer_handles) { - if (!model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation finished before " - "starting allocating model"); - return kTfLiteError; - } - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles( - scratch_buffer_handles, scratch_buffer_request_count_)); - TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph, eval_tensors, - *scratch_buffer_handles)); - TF_LITE_ENSURE_STATUS(AllocateVariables(subgraph, eval_tensors)); - - model_is_allocating_ = false; - return kTfLiteOk; -} - -void* MicroAllocator::AllocatePersistentBuffer(size_t bytes) { - return memory_allocator_->AllocateFromTail(bytes, kBufferAlignment); -} - -TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, - int* buffer_idx) { - // All scratch buffer requests are stored in the head section of the arena - // when a model is in the prepare phase. First align a scratch buffer request - // pointer to the start of the head: - internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); - - // Count the number of requested scratch buffers for the current node: - size_t current_node_request_count = 0; - for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { - if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { - ++current_node_request_count; - } - } - - // First, ensure that the per-kernel request has not exceeded the limit: - if (current_node_request_count >= kMaxScratchBuffersPerOp) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Scratch buffer request exeeds limit per operator (%d)", - kMaxScratchBuffersPerOp); - return kTfLiteError; - } - - // Initialize and assign values for the request at the current index: - internal::ScratchBufferRequest* current_request = - &requests[scratch_buffer_request_count_]; - *current_request = {}; - // Assign -1 as a sentinel value that will be updated when the node finishes - // allocating: - current_request->bytes = bytes; - current_request->node_idx = kUnassignedScratchBufferRequestIndex; - - // Assign the current request index to the out-param: - *buffer_idx = scratch_buffer_request_count_; - - // Bump the request count to prepare for the next request: - ++scratch_buffer_request_count_; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { - // When a node has finished preparing, all temp allocations performed by the - // kernel should be cleaned up: - ResetTempAllocations(); - - // Find and update any new scratch buffer requests for the current node: - internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); - - for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { - // A request with a node_idx of -1 is a sentinel value used to indicate this - // was a new request for the current node. The allocator finally knows the - // node index at this point. Assign the value and update the list of new - // requests so the head section can be adjusted to allow for the next kernel - // to allocate at most kMaxScratchBuffersPerOp requests: - if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { - requests[i].node_idx = node_id; - } - } - - // Ensure that the head is re-adjusted to allow for another at-most - // kMaxScratchBuffersPerOp scratch buffer requests in the next operator: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - sizeof(internal::ScratchBufferRequest) * - (scratch_buffer_request_count_ + kMaxScratchBuffersPerOp), - alignof(internal::ScratchBufferRequest))); - - return kTfLiteOk; -} - -size_t MicroAllocator::used_bytes() const { - return memory_allocator_->GetUsedBytes(); -} - -TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations) { - TFLITE_DCHECK(node_and_registrations); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - NodeAndRegistration* output = reinterpret_cast( - memory_allocator_->AllocateFromTail( - sizeof(NodeAndRegistration) * subgraph->operators()->size(), - alignof(NodeAndRegistration))); - if (output == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for node_and_registrations."); - return kTfLiteError; - } - *node_and_registrations = output; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) { - TFLITE_DCHECK(model != nullptr); - TFLITE_DCHECK(node_and_registrations != nullptr); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - TfLiteStatus status = kTfLiteOk; - auto* opcodes = model->operator_codes(); - MicroBuiltinDataAllocator builtin_data_allocator(memory_allocator_); - for (size_t i = 0; i < subgraph->operators()->size(); ++i) { - const auto* op = subgraph->operators()->Get(i); - const size_t index = op->opcode_index(); - if (index >= opcodes->size()) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Missing registration for opcode_index %d\n", index); - return kTfLiteError; - } - auto* opcode = (*opcodes)[index]; - status = - GetRegistrationFromOpCode(opcode, op_resolver, error_reporter_, - &(node_and_registrations[i].registration)); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to get registration from op code %s\n ", - EnumNameBuiltinOperator(GetBuiltinCode(opcode))); - return status; - } - const auto* registration = node_and_registrations[i].registration; - if (registration == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, "Skipping op for opcode_index %d\n", - index); - return kTfLiteError; - } - BuiltinOperator op_type = - static_cast(registration->builtin_code); - - const char* custom_data = nullptr; - size_t custom_data_size = 0; - unsigned char* builtin_data = nullptr; - - if (op_type == BuiltinOperator_CUSTOM) { - // Custom Ops may or may not have a non-null custom_options field. - if (op->custom_options() != nullptr) { - custom_data = - reinterpret_cast(op->custom_options()->data()); - custom_data_size = op->custom_options()->size(); - } - } else { - if (op->custom_options() != nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Unsupported behavior: found builtin operator %s with custom " - "options.\n", - EnumNameBuiltinOperator(op_type)); - return kTfLiteError; - } - - MicroOpResolver::BuiltinParseFunction parser = - op_resolver.GetOpDataParser(op_type); - if (parser == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, "Did not find a parser for %s", - EnumNameBuiltinOperator(op_type)); - - return kTfLiteError; - } - TF_LITE_ENSURE_STATUS(parser(op, error_reporter_, &builtin_data_allocator, - (void**)(&builtin_data))); - } - - TfLiteIntArray* inputs_array; - TF_LITE_ENSURE_STATUS(internal::FlatBufferVectorToTfLiteTypeArray( - memory_allocator_, error_reporter_, op->inputs(), &inputs_array)); - - TfLiteIntArray* outputs_array; - TF_LITE_ENSURE_STATUS(internal::FlatBufferVectorToTfLiteTypeArray( - memory_allocator_, error_reporter_, op->outputs(), &outputs_array)); - - TfLiteNode* node = &(node_and_registrations[i].node); - *node = {}; - node->inputs = inputs_array; - node->outputs = outputs_array; - node->builtin_data = reinterpret_cast(builtin_data); - node->custom_initial_data = custom_data; - node->custom_initial_data_size = custom_data_size; - } - - return kTfLiteOk; -} - -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - // This value is allocated from persistent arena space. It is guaranteed to be - // around for the lifetime of the application. - TfLiteTensor* tensor = - AllocatePersistentTfLiteTensorInternal(model, eval_tensors, tensor_index); - - // Populate any fields from the flatbuffer, since this TfLiteTensor struct is - // allocated in the persistent section of the arena, ensure that additional - // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, subgraph, tensor, tensor_index, - /*allocate_temp=*/false) != - kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to populate a persistent TfLiteTensor struct " - "from flatbuffer data!"); - return nullptr; - } - - if (eval_tensors != nullptr) { - // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) - // and not located in the flatbuffer are stored on the pre-allocated list of - // TfLiteEvalTensors structs. These structs are the source of truth, simply - // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = eval_tensors[tensor_index].data.data; - } - return tensor; -} - -TfLiteTensor* MicroAllocator::AllocateTempTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - // This value is allocated from temporary arena space. It is guaranteed to be - // around for at least the scope of the calling function. Since this struct - // allocation takes place in temp space, no need to own or cleanup. - TfLiteTensor* tensor = - reinterpret_cast(memory_allocator_->AllocateTemp( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); - - // Populate any fields from the flatbuffer, since this TfLiteTensor struct is - // allocated in the temp section of the arena, ensure that additional - // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, subgraph, tensor, tensor_index, - /*allocate_temp=*/true) != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to populate a temp TfLiteTensor struct from flatbuffer data!"); - return nullptr; - } - - if (eval_tensors != nullptr) { - // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) - // and not located in the flatbuffer are stored on the pre-allocated list of - // TfLiteEvalTensors structs. These structs are the source of truth, simply - // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = eval_tensors[tensor_index].data.data; - } - return tensor; -} - -void MicroAllocator::ResetTempAllocations() { - memory_allocator_->ResetTempAllocations(); -} - -TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) { - TFLITE_DCHECK(eval_tensors != nullptr); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - size_t alloc_count = subgraph->tensors()->size(); - TfLiteEvalTensor* tensors = - reinterpret_cast(memory_allocator_->AllocateFromTail( - sizeof(TfLiteEvalTensor) * alloc_count, alignof(TfLiteEvalTensor))); - if (tensors == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate memory for context->eval_tensors, " - "%d bytes required", - sizeof(TfLiteEvalTensor) * alloc_count); - return kTfLiteError; - } - - for (size_t i = 0; i < alloc_count; ++i) { - TfLiteStatus status = internal::InitializeTfLiteEvalTensorFromFlatbuffer( - memory_allocator_, *subgraph->tensors()->Get(i), model->buffers(), - error_reporter_, &tensors[i]); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, "Failed to initialize tensor %d", - i); - return kTfLiteError; - } - } - *eval_tensors = tensors; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) { - for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { - auto* tensor = subgraph->tensors()->Get(i); - if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); - - eval_tensors[i].data.data = - memory_allocator_->AllocateFromTail(buffer_size, kBufferAlignment); - - if (eval_tensors[i].data.data == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate variable tensor of size %d", - buffer_size); - return kTfLiteError; - } - } - } - return kTfLiteOk; -} - -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - return reinterpret_cast(memory_allocator_->AllocateFromTail( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); -} - -TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp) { - // TODO(b/162311891): This method serves as a stub to ensure quantized - // allocations in the tail can be recorded. Once the interpreter has APIs for - // accessing buffers on TfLiteEvalTensor this method can be dropped. - return internal::InitializeTfLiteTensorFromFlatbuffer( - memory_allocator_, allocate_temp, *subgraph->tensors()->Get(tensor_index), - model->buffers(), error_reporter_, tensor); -} - -ErrorReporter* MicroAllocator::error_reporter() const { - return error_reporter_; -} - -const SubGraph* MicroAllocator::GetSubGraphFromModel(const Model* model) { - auto* subgraphs = model->subgraphs(); - if (subgraphs->size() != 1) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Only 1 subgraph is currently supported.\n"); - return nullptr; - } - return (*subgraphs)[0]; -} - -TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( - const Model* model, const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors, - ScratchBufferHandle* scratch_buffer_handles) { - size_t head_usage = 0; - // Create static memory plan - // 1. Calculate AllocationInfo to know the lifetime of each tensor/buffer. - // 2. Add them into the planner (such as the GreedyMemoryPlanner). - // 3. Static memory planning using the planner. - // 4. Set tensor/buffer pointers based on the offsets from the previous step. - // - // Note that AllocationInfo is only needed for creating the plan. It will be - // allocated from the temp section and cleaned up at the bottom of this - // function. - - size_t allocation_info_count = - subgraph->tensors()->size() + scratch_buffer_request_count_; - size_t bytes = sizeof(AllocationInfo) * allocation_info_count; - - // Allocate an array of AllocationInfo structs from the temp section. This - // struct will be used by AllocationInfoBuilder to find buffer usage. - AllocationInfo* allocation_info = reinterpret_cast( - memory_allocator_->AllocateTemp(bytes, alignof(AllocationInfo))); - if (allocation_info == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for allocation_info, %d bytes required", - bytes); - return kTfLiteError; - } - - // Use the AllocationInfoBuilder class to help determine where buffers are - // used in the subgraph. - AllocationInfoBuilder builder(allocation_info, subgraph->tensors()->size(), - scratch_buffer_request_count_, error_reporter_); - - const int32_t* offline_planner_offsets = nullptr; - TF_LITE_ENSURE_STATUS( - builder.GetOfflinePlannedOffsets(model, &offline_planner_offsets)); - TF_LITE_ENSURE_STATUS( - builder.AddTensors(subgraph, offline_planner_offsets, eval_tensors)); - - internal::ScratchBufferRequest* scratch_buffer_requests = - GetScratchBufferRequests(); - - TF_LITE_ENSURE_STATUS(builder.AddScratchBuffers(scratch_buffer_requests, - scratch_buffer_handles)); - - // Remaining arena size that memory planner can use for calculating offsets. - size_t remaining_arena_size = - memory_allocator_->GetAvailableMemory(kBufferAlignment); - uint8_t* planner_arena = - memory_allocator_->AllocateTemp(remaining_arena_size, kBufferAlignment); - TF_LITE_ENSURE(error_reporter_, planner_arena != nullptr); - GreedyMemoryPlanner planner(planner_arena, remaining_arena_size); - TF_LITE_ENSURE_STATUS(CreatePlan(error_reporter_, &planner, allocation_info, - allocation_info_count)); - - // Reset all temp allocations used above: - memory_allocator_->ResetTempAllocations(); - - size_t actual_available_arena_size = - memory_allocator_->GetAvailableMemory(kBufferAlignment); - - // Make sure we have enough arena size. - if (planner.GetMaximumMemorySize() > actual_available_arena_size) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Arena size is too small for all buffers. Needed %u but only " - "%u was available.", - planner.GetMaximumMemorySize(), actual_available_arena_size); - return kTfLiteError; - } - // Commit the plan. - TF_LITE_ENSURE_STATUS(CommitPlan(error_reporter_, &planner, - memory_allocator_->GetHeadBuffer(), - allocation_info, allocation_info_count)); - head_usage = planner.GetMaximumMemorySize(); - - // The head is used to store memory plans for one model at a time during the - // model preparation stage, and is re-purposed to store scratch buffer handles - // during model invocation. The head must be as large as the greater of the - // largest model memory plan's size and the total space required for all - // scratch buffer handles. - if (max_head_buffer_usage_ < head_usage) { - max_head_buffer_usage_ = head_usage; - } - - // The head is used for storing scratch buffer allocations before finalizing a - // memory plan in this function. Ensure that the head is set to the largest - // memory plan sent through the allocator: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - max_head_buffer_usage_, kBufferAlignment)); - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::AllocateScratchBufferHandles( - ScratchBufferHandle** scratch_buffer_handles, size_t handle_count) { - TFLITE_DCHECK(scratch_buffer_handles != nullptr); - - if (scratch_buffer_request_count_ == 0) { - // No scratch buffer requests were requested during model allocation. - return kTfLiteOk; - } - - // Allocate a consecutive block of memory store the scratch buffer handles. - // This alignment ensures quick lookup during inference time for the model: - *scratch_buffer_handles = reinterpret_cast( - memory_allocator_->AllocateFromTail( - sizeof(ScratchBufferHandle) * handle_count, - alignof(ScratchBufferHandle))); - - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::InitScratchBufferData() { - // A model is preparing to allocate resources, ensure that scratch buffer - // request counter is cleared: - scratch_buffer_request_count_ = 0; - - // All requests will be stored in the head section. Each kernel is allowed at - // most kMaxScratchBuffersPerOp requests. Adjust the head to reserve at most - // that many requests to begin: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - sizeof(internal::ScratchBufferRequest) * kMaxScratchBuffersPerOp, - alignof(internal::ScratchBufferRequest))); - - return kTfLiteOk; -} - -internal::ScratchBufferRequest* MicroAllocator::GetScratchBufferRequests() { - return reinterpret_cast( - AlignPointerUp(memory_allocator_->GetHeadBuffer(), - alignof(internal::ScratchBufferRequest))); -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_allocator.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_allocator.h deleted file mode 100644 index 39a12eaf..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_allocator.h +++ /dev/null @@ -1,279 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -namespace internal { - -// Sets up all of the data structure members for a TfLiteTensor based on the -// contents of a serialized tensor in the flatbuffer. -// TODO(b/162311891): Drop this method when the interpreter has an API for -// returning buffers on TfLiteEvalTensor. -TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, bool allocate_temp, - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result); - -// Holds placeholder information for a scratch buffer request from a kernel. -// This struct is only used during the model prepare stage. Each request from a -// kernel is stored in the head section. During the prepare stage, the head -// section will at least hold kMaxScratchBuffersPerOp number of requests plus -// any requests from previous kernel requests. -// -// When the memory plan is finalized, these structs are no longer used in favor -// of a sequential, array of ScratchBufferHandle allocations in the tail -// section. These allocations are indexed by the request API defined in the -// TfLiteContext struct. -typedef struct { - // Number of bytes required by the buffer. The actual allocated size might be - // greater than `bytes` due to buffer alignment. - size_t bytes; - // Node where the buffer is allocated for. This provides useful information to - // determine the lifetime of the buffer. In AllocationInfo, this buffer will - // have `before` = node_idx and `after` = node_idx. - int node_idx; -} ScratchBufferRequest; - -} // namespace internal - -typedef struct { - TfLiteNode node; - const TfLiteRegistration* registration; -} NodeAndRegistration; - -// Holds a pointer to a buffer for a scratch buffer requested by a kernel during -// the model prepare stage. This struct is allocated in-place and allows for -// quick pointer-indexed lookup for speed during model inference. -typedef struct { - // Pointer to location of the scratch buffer: - uint8_t* data; -} ScratchBufferHandle; - -// Allocator responsible for allocating memory for all intermediate tensors -// necessary to invoke a model. -// -// The lifetime of the model, tensor arena and error reporter must be at -// least as long as that of the allocator object, since the allocator needs -// them to be accessible during its entire lifetime. -// -// The MicroAllocator simply plans out additional allocations that are required -// to standup a model for inference in TF Micro. This class currently relies on -// an additional allocator - SimpleMemoryAllocator - for all allocations from an -// arena. These allocations are divided into head (non-persistent) and tail -// (persistent) regions: -// -// Memory layout to help understand how it works -// This information could change in the future version. -// ************** .memory_allocator->GetBuffer() -// Tensors/Scratch buffers (head) -// ************** .head_watermark -// unused memory -// ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize() -// - ->GetDataSize() -// persistent area (tail) -// ************** .memory_allocator->GetBuffer() + ->GetMaxBufferSize() -class MicroAllocator { - public: - // Creates a MicroAllocator instance from a given tensor arena. This arena - // will be managed by the created instance. - // Note: Please use __declspec(align(16)) to make sure tensor_arena is 16 - // bytes aligned, otherwise some head room will be wasted. - // TODO(b/157615197): Cleanup constructor + factory usage. - static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter); - - // Creates a MicroAllocator instance using the provided SimpleMemoryAllocator - // intance. This allocator instance will use the SimpleMemoryAllocator - // instance to manage allocations internally. - static MicroAllocator* Create(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); - - // Begin allocating internal resources required for model inference. - // This method will run through the flatbuffer data supplied in the model to - // properly allocate tensor, node, and op registration data. This method is - // expected to be followed with a call to FinishModelAllocation() before - // resuming allocation with another model. All persistent tensor buffers are - // stored in the out-param eval_tensors. This value is allocated from the - // persistent memory arena and will be used to host runtime tensor buffers. - TfLiteStatus StartModelAllocation( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration** node_and_registrations, - TfLiteEvalTensor** eval_tensors); - - // Finish allocating internal resources required for model inference. - // This method will plan non-persistent buffers and commit a memory plan to - // the 'head' section of the memory arena. All variable tensor data will also - // be allocated. This method should be called after assigning model resources - // in StartModelAllocation(). The eval_tensors pointer should be the value - // passed into this class during StartModelAllocation(). Scratch buffer - // handles are stored in the out-param `scratch_buffer_handles`. This value - // will be used in `GetScratchBuffer` call to retrieve scratch buffers. - TfLiteStatus FinishModelAllocation( - const Model* model, TfLiteEvalTensor* eval_tensors, - ScratchBufferHandle** scratch_buffer_handles); - - // Allocates a TfLiteTensor struct and populates the returned value with - // properties from the model flatbuffer. This struct is allocated from - // persistent arena memory is only guaranteed for the lifetime of the - // application. The eval_tensors pointer should be the value passed into this - // class during StartModelAllocation() and contains the source-of-truth for - // buffers. - virtual TfLiteTensor* AllocatePersistentTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index); - - // Allocates a TfLiteTensor struct and populates the returned value with - // properties from the model flatbuffer. This struct is allocated from - // temporary arena memory is only guaranteed until a call is made to - // ResetTempAllocations(). The eval_tensors pointer should be the value passed - // into this class during StartModelAllocation() and contains the - // source-of-truth for buffers. - virtual TfLiteTensor* AllocateTempTfLiteTensor(const Model* model, - TfLiteEvalTensor* eval_tensors, - int tensor_index); - - // Resets all temporary allocations. This method should be called after a - // chain of temp allocations (e.g. chain of TfLiteTensor objects via - // AllocateTfLiteTensor()). - virtual void ResetTempAllocations(); - - // Allocates persistent buffer which has the same life time as the allocator. - // The memory is immediately available and is allocated from the tail of the - // arena. - virtual void* AllocatePersistentBuffer(size_t bytes); - - // Register a scratch buffer of size `bytes` for Node with `node_id`. - // This method only requests a buffer with a given size to be used after a - // model has finished allocation via FinishModelAllocation(). All requested - // buffers will be accessible by the out-param in that method. - TfLiteStatus RequestScratchBufferInArena(size_t bytes, int* buffer_idx); - - // Finish allocating a specific NodeAndRegistration prepare block (kernel - // entry for a model) with a given node ID. This call ensures that any scratch - // buffer requests and temporary allocations are handled and ready for the - // next node prepare block. - TfLiteStatus FinishPrepareNodeAllocations(int node_id); - - // Returns the arena usage in bytes, only available after - // `FinishModelAllocation`. Otherwise, it will return 0. - size_t used_bytes() const; - - protected: - MicroAllocator(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); - virtual ~MicroAllocator(); - - // Allocates an array in the arena to hold pointers to the node and - // registration pointers required to represent the inference graph of the - // model. - virtual TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations); - - // Populates node and registration pointers representing the inference graph - // of the model from values inside the flatbuffer (loaded from the TfLiteModel - // instance). Persistent data (e.g. operator data) is allocated from the - // arena. - virtual TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations); - - // Allocates the list of persistent TfLiteEvalTensors that are used for the - // "eval" phase of model inference. These structs will be the source of truth - // for all tensor buffers. Allocation results are stored in the out-param - // eval_tensors. - virtual TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors); - - // Allocates persistent tensor buffers for variable tensors in the subgraph. - virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors); - - // Allocate and return a persistent TfLiteTensor. - // TODO(b/162311891): Drop this method when the interpreter has an API for - // accessing TfLiteEvalTensor structs. - virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index); - - // Populates a TfLiteTensor struct with data from the model flatbuffer. Any - // quantization data is allocated from either the tail (persistent) or temp - // sections of the arena based on the allocation flag. - virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp); - - ErrorReporter* error_reporter() const; - - // Returns the first subgraph from the model. - const SubGraph* GetSubGraphFromModel(const Model* model); - - private: - // Commits a memory plan for all non-persistent buffer allocations in the - // 'head' section of the memory arena. The eval_tensors pointer is the list of - // pre-allocated TfLiteEvalTensor structs that will point to the buffers that - // will be allocated into the head section in this function call. The - // scratch_buffer_handles pointer is the array of pre-allocated - // ScratchBufferHandle structs that will point to allocated buffers also in - // the head section. - virtual TfLiteStatus CommitStaticMemoryPlan( - const Model* model, const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors, - ScratchBufferHandle* scratch_buffer_handles); - - // Allocates an array of ScratchBufferHandle structs in the tail section for a - // given number of handles. - virtual TfLiteStatus AllocateScratchBufferHandles( - ScratchBufferHandle** scratch_buffer_handles, size_t handle_count); - - // Clears all internal scratch buffer request counts and resets the head to - // prepare for kernels to request scratch buffer data when a model is - // preparing. - TfLiteStatus InitScratchBufferData(); - - // Returns the pointer for the array of ScratchBufferRequest allocations in - // the head section. - internal::ScratchBufferRequest* GetScratchBufferRequests(); - - // A simple memory allocator that always allocate from the arena tail or head. - SimpleMemoryAllocator* memory_allocator_; - - ErrorReporter* error_reporter_; - bool model_is_allocating_; - - // Holds the number of ScratchBufferRequest instances stored in the head - // section when a model is allocating. - size_t scratch_buffer_request_count_ = 0; - - // Holds the byte length of the memory plan with the largest head usage. Used - // to ensure that multi-tenant allocations can share the head for buffers. - size_t max_head_buffer_usage_ = 0; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_error_reporter.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_error_reporter.cc deleted file mode 100644 index 6d8361cd..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_error_reporter.cc +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/micro_error_reporter.h" - -#include - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#include "tensorflow/lite/micro/debug_log.h" -#include "tensorflow/lite/micro/micro_string.h" -#endif - -namespace tflite { - -int MicroErrorReporter::Report(const char* format, va_list args) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - // Only pulling in the implementation of this function for builds where we - // expect to make use of it to be extra cautious about not increasing the code - // size. - static constexpr int kMaxLogLen = 256; - char log_buffer[kMaxLogLen]; - MicroVsnprintf(log_buffer, kMaxLogLen, format, args); - DebugLog(log_buffer); - DebugLog("\r\n"); -#endif - return 0; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_error_reporter.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_error_reporter.h deleted file mode 100644 index e2c073a4..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_error_reporter.h +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ - -#include - -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/micro/compatibility.h" - -namespace tflite { - -class MicroErrorReporter : public ErrorReporter { - public: - ~MicroErrorReporter() override {} - int Report(const char* format, va_list args) override; - - private: - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_interpreter.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_interpreter.cc deleted file mode 100644 index deb35c37..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_interpreter.cc +++ /dev/null @@ -1,453 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/micro/micro_interpreter.h" - -#include -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/tensor_utils.h" -#include "tensorflow/lite/micro/memory_helpers.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/micro/micro_profiler.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { -namespace { - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -const char* OpNameFromRegistration(const TfLiteRegistration* registration) { - if (registration->builtin_code == BuiltinOperator_CUSTOM) { - return registration->custom_name; - } else { - return EnumNameBuiltinOperator(BuiltinOperator(registration->builtin_code)); - } -} -#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) - -} // namespace - -namespace internal { - -ContextHelper::ContextHelper(ErrorReporter* error_reporter, - MicroAllocator* allocator, const Model* model) - : allocator_(allocator), error_reporter_(error_reporter), model_(model) {} - -void* ContextHelper::AllocatePersistentBuffer(TfLiteContext* ctx, - size_t bytes) { - return reinterpret_cast(ctx->impl_) - ->allocator_->AllocatePersistentBuffer(bytes); -} - -TfLiteStatus ContextHelper::RequestScratchBufferInArena(TfLiteContext* ctx, - size_t bytes, - int* buffer_idx) { - ContextHelper* helper = reinterpret_cast(ctx->impl_); - return helper->allocator_->RequestScratchBufferInArena(bytes, buffer_idx); -} - -void* ContextHelper::GetScratchBuffer(TfLiteContext* ctx, int buffer_idx) { - ContextHelper* helper = reinterpret_cast(ctx->impl_); - ScratchBufferHandle* handle = helper->scratch_buffer_handles_ + buffer_idx; - return handle->data; -} - -void ContextHelper::ReportOpError(struct TfLiteContext* context, - const char* format, ...) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - ContextHelper* helper = static_cast(context->impl_); - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(helper->error_reporter_, format, args); - va_end(args); -#endif -} - -TfLiteTensor* ContextHelper::GetTensor(const struct TfLiteContext* context, - int tensor_idx) { - ContextHelper* helper = static_cast(context->impl_); - return helper->allocator_->AllocateTempTfLiteTensor( - helper->model_, helper->eval_tensors_, tensor_idx); -} - -TfLiteEvalTensor* ContextHelper::GetEvalTensor( - const struct TfLiteContext* context, int tensor_idx) { - ContextHelper* helper = reinterpret_cast(context->impl_); - return &helper->eval_tensors_[tensor_idx]; -} - -void ContextHelper::SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors) { - eval_tensors_ = eval_tensors; -} - -void ContextHelper::SetScratchBufferHandles( - ScratchBufferHandle* scratch_buffer_handles) { - scratch_buffer_handles_ = scratch_buffer_handles; -} - -} // namespace internal - -MicroInterpreter::MicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - uint8_t* tensor_arena, - size_t tensor_arena_size, - ErrorReporter* error_reporter, - tflite::Profiler* profiler) - : model_(model), - op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*MicroAllocator::Create(tensor_arena, tensor_arena_size, - error_reporter)), - tensors_allocated_(false), - initialization_status_(kTfLiteError), - eval_tensors_(nullptr), - context_helper_(error_reporter_, &allocator_, model), - input_tensor_(nullptr), - output_tensor_(nullptr) { - Init(profiler); -} - -MicroInterpreter::MicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - MicroAllocator* allocator, - ErrorReporter* error_reporter, - tflite::Profiler* profiler) - : model_(model), - op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*allocator), - tensors_allocated_(false), - initialization_status_(kTfLiteError), - eval_tensors_(nullptr), - context_helper_(error_reporter_, &allocator_, model), - input_tensor_(nullptr), - output_tensor_(nullptr) { - Init(profiler); -} - -MicroInterpreter::~MicroInterpreter() { - if (node_and_registrations_ != nullptr) { - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - TfLiteNode* node = &(node_and_registrations_[i].node); - const TfLiteRegistration* registration = - node_and_registrations_[i].registration; - // registration is allocated outside the interpreter, so double check to - // make sure it's not nullptr; - if (registration != nullptr && registration->free != nullptr) { - registration->free(&context_, node->user_data); - } - } - } -} - -void MicroInterpreter::Init(tflite::Profiler* profiler) { - const flatbuffers::Vector>* subgraphs = - model_->subgraphs(); - if (subgraphs->size() != 1) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Only 1 subgraph is currently supported.\n"); - initialization_status_ = kTfLiteError; - return; - } - subgraph_ = (*subgraphs)[0]; - - context_.impl_ = static_cast(&context_helper_); - context_.ReportError = context_helper_.ReportOpError; - context_.GetTensor = context_helper_.GetTensor; - context_.GetEvalTensor = context_helper_.GetEvalTensor; - context_.recommended_num_threads = 1; - context_.profiler = profiler; - - initialization_status_ = kTfLiteOk; -} - -void MicroInterpreter::CorrectTensorEndianness(TfLiteEvalTensor* tensorCorr) { - int32_t tensorSize = 1; - for (int d = 0; d < tensorCorr->dims->size; ++d) - tensorSize *= reinterpret_cast(tensorCorr->dims->data)[d]; - - switch (tensorCorr->type) { - case TfLiteType::kTfLiteFloat32: - CorrectTensorDataEndianness(tensorCorr->data.f, tensorSize); - break; - case TfLiteType::kTfLiteFloat16: - CorrectTensorDataEndianness(tensorCorr->data.f16, tensorSize); - break; - case TfLiteType::kTfLiteInt64: - CorrectTensorDataEndianness(tensorCorr->data.i64, tensorSize); - break; - case TfLiteType::kTfLiteUInt64: - CorrectTensorDataEndianness(tensorCorr->data.u64, tensorSize); - break; - case TfLiteType::kTfLiteInt32: - CorrectTensorDataEndianness(tensorCorr->data.i32, tensorSize); - break; - case TfLiteType::kTfLiteInt16: - CorrectTensorDataEndianness(tensorCorr->data.i16, tensorSize); - break; - case TfLiteType::kTfLiteComplex64: - CorrectTensorDataEndianness(tensorCorr->data.c64, tensorSize); - break; - case TfLiteType::kTfLiteComplex128: - CorrectTensorDataEndianness(tensorCorr->data.c128, tensorSize); - break; - default: - // Do nothing for other data types. - break; - } -} - -template -void MicroInterpreter::CorrectTensorDataEndianness(T* data, int32_t size) { - for (int32_t i = 0; i < size; ++i) { - data[i] = flatbuffers::EndianScalar(data[i]); - } -} - -TfLiteStatus MicroInterpreter::AllocateTensors() { - if (allocator_.StartModelAllocation(model_, op_resolver_, - &node_and_registrations_, - &eval_tensors_) != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed starting model allocation.\n"); - initialization_status_ = kTfLiteError; - return kTfLiteError; - } - - // Update the pointer now that TfLiteEvalTensor allocation has completed on - // the context helper. - // TODO(b/16157777): This call would not be needed if ContextHelper rolled - // into the interpreter. - context_helper_.SetTfLiteEvalTensors(eval_tensors_); - context_.tensors_size = subgraph_->tensors()->size(); - - // If the system is big endian then convert weights from the flatbuffer from - // little to big endian on startup so that it does not need to be done during - // inference. - // NOTE: This requires that the flatbuffer is held in memory which can be - // modified by this process. - if (!FLATBUFFERS_LITTLEENDIAN) { - for (size_t t = 0; t < subgraph_->tensors()->size(); ++t) { - if (auto* buffer = - (*model_->buffers())[subgraph_->tensors()->Get(t)->buffer()]) { - // If we've found a buffer, does it have any data? - if (auto* array = buffer->data()) { - // If it has any data, is the data size larger than zero? - if (array->size()) { - // Update the endianness of the corresponding eval tensor since that - // struct holds the buffer used at inference time. - CorrectTensorEndianness(&eval_tensors_[t]); - } - } - } - } - } - - // Only allow AllocatePersistentBuffer in Init stage. - context_.AllocatePersistentBuffer = context_helper_.AllocatePersistentBuffer; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = nullptr; - - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - size_t init_data_size; - const char* init_data; - if (registration->builtin_code == BuiltinOperator_CUSTOM) { - init_data = reinterpret_cast(node->custom_initial_data); - init_data_size = node->custom_initial_data_size; - } else { - init_data = reinterpret_cast(node->builtin_data); - init_data_size = 0; - } - if (registration->init) { - node->user_data = - registration->init(&context_, init_data, init_data_size); - } - } - - // Both AllocatePersistentBuffer and RequestScratchBufferInArena is - // available in Prepare stage. - context_.RequestScratchBufferInArena = - context_helper_.RequestScratchBufferInArena; - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - if (registration->prepare) { - TfLiteStatus prepare_status = registration->prepare(&context_, node); - if (prepare_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %df) failed to prepare with status %d", - OpNameFromRegistration(registration), i, prepare_status); - return kTfLiteError; - } - } - allocator_.FinishPrepareNodeAllocations(/*node_id=*/i); - } - - // Prepare is done, we're ready for Invoke. Memory allocation is no longer - // allowed. Kernels can only fetch scratch buffers via GetScratchBuffer. - context_.AllocatePersistentBuffer = nullptr; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = context_helper_.GetScratchBuffer; - - TF_LITE_ENSURE_OK(&context_, - allocator_.FinishModelAllocation(model_, eval_tensors_, - &scratch_buffer_handles_)); - // TODO(b/16157777): Remove this when ContextHelper is rolled into this class. - context_helper_.SetScratchBufferHandles(scratch_buffer_handles_); - - TF_LITE_ENSURE_STATUS(ResetVariableTensors()); - - tensors_allocated_ = true; - return kTfLiteOk; -} - -TfLiteStatus MicroInterpreter::Invoke() { - if (initialization_status_ != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invoke() called after initialization failed\n"); - return kTfLiteError; - } - - // Ensure tensors are allocated before the interpreter is invoked to avoid - // difficult to debug segfaults. - if (!tensors_allocated_) { - TF_LITE_ENSURE_OK(&context_, AllocateTensors()); - } - - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - - if (registration->invoke) { - TfLiteStatus invoke_status; -#ifndef NDEBUG // Omit profiler overhead from release builds. - // The case where profiler == nullptr is handled by - // ScopedOperatorProfile. - tflite::Profiler* profiler = - reinterpret_cast(context_.profiler); - ScopedOperatorProfile scoped_profiler( - profiler, OpNameFromRegistration(registration), i); -#endif - invoke_status = registration->invoke(&context_, node); - - // All TfLiteTensor structs used in the kernel are allocated from temp - // memory in the allocator. This creates a chain of allocations in the - // temp section. The call below resets the chain of allocations to - // prepare for the next call. - allocator_.ResetTempAllocations(); - - if (invoke_status == kTfLiteError) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %d) failed to invoke with status %d", - OpNameFromRegistration(registration), i, invoke_status); - return kTfLiteError; - } else if (invoke_status != kTfLiteOk) { - return invoke_status; - } - } - } - return kTfLiteOk; -} - -TfLiteTensor* MicroInterpreter::input(size_t index) { - const size_t length = inputs_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Input index %d out of range (length is %d)", index, - length); - return nullptr; - } - if (index != 0) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Input tensors not at index 0 are allocated from the " - "persistent memory arena. Repeat calls will cause excess " - "allocation!"); - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - inputs().Get(index)); - } - if (input_tensor_ == nullptr) { - input_tensor_ = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, inputs().Get(index)); - } - return input_tensor_; -} - -TfLiteTensor* MicroInterpreter::output(size_t index) { - const size_t length = outputs_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Output index %d out of range (length is %d)", index, - length); - return nullptr; - } - if (index != 0) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Output tensors not at index 0 are allocated from the " - "persistent memory arena. Repeat calls will cause excess " - "allocation!"); - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - outputs().Get(index)); - } - if (output_tensor_ == nullptr) { - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - output_tensor_ = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, outputs().Get(index)); - } - return output_tensor_; -} - -TfLiteTensor* MicroInterpreter::tensor(size_t index) { - const size_t length = tensors_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Tensor index %d out of range (length is %d)", index, - length); - return nullptr; - } - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - index); -} - -TfLiteStatus MicroInterpreter::ResetVariableTensors() { - for (size_t i = 0; i < subgraph_->tensors()->size(); ++i) { - auto* tensor = subgraph_->tensors()->Get(i); - if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors_[i], &buffer_size)); - - int value = 0; - if (tensor->type() == tflite::TensorType_INT8) { - value = tensor->quantization()->zero_point()->Get(0); - } - memset(eval_tensors_[i].data.raw, value, buffer_size); - } - } - - return kTfLiteOk; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_interpreter.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_interpreter.h deleted file mode 100644 index 31720c8e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_interpreter.h +++ /dev/null @@ -1,211 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/profiler.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/portable_type_to_tflitetype.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -namespace internal { - -// A helper class to encapsulate the implementation of APIs in Context. -// context->impl_ points to an instance of this class. -// Check tensorflow/lite/c/common.h for detailed descriptions. -// TODO(b/16157777): Consider rolling this class into MicroInterpreter. -class ContextHelper { - public: - explicit ContextHelper(ErrorReporter* error_reporter, - MicroAllocator* allocator, const Model* model); - - // Functions that will be assigned to function pointers on TfLiteContext: - static void* AllocatePersistentBuffer(TfLiteContext* ctx, size_t bytes); - static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* ctx, - size_t bytes, - int* buffer_idx); - static void* GetScratchBuffer(TfLiteContext* ctx, int buffer_idx); - static void ReportOpError(struct TfLiteContext* context, const char* format, - ...); - static TfLiteTensor* GetTensor(const struct TfLiteContext* context, - int tensor_idx); - static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, - int tensor_idx); - - // Sets the pointer to a list of TfLiteEvalTensor instances. - void SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors); - - // Sets the pointer to a list of ScratchBufferHandle instances. - void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles); - - private: - MicroAllocator* allocator_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; - const Model* model_ = nullptr; - TfLiteEvalTensor* eval_tensors_ = nullptr; - ScratchBufferHandle* scratch_buffer_handles_ = nullptr; -}; - -} // namespace internal - -class MicroInterpreter { - public: - // The lifetime of the model, op resolver, tensor arena, error reporter and - // profiler must be at least as long as that of the interpreter object, since - // the interpreter may need to access them at any time. This means that you - // should usually create them with the same scope as each other, for example - // having them all allocated on the stack as local variables through a - // top-level function. The interpreter doesn't do any deallocation of any of - // the pointed-to objects, ownership remains with the caller. - MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, - uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter, - tflite::Profiler* profiler = nullptr); - - // Create an interpreter instance using an existing MicroAllocator instance. - // This constructor should be used when creating an allocator that needs to - // have allocation handled in more than one interpreter or for recording - // allocations inside the interpreter. The lifetime of the allocator must be - // as long as that of the interpreter object. - MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, - MicroAllocator* allocator, ErrorReporter* error_reporter, - tflite::Profiler* profiler = nullptr); - - ~MicroInterpreter(); - - // Runs through the model and allocates all necessary input, output and - // intermediate tensors. - TfLiteStatus AllocateTensors(); - - // In order to support partial graph runs for strided models, this can return - // values other than kTfLiteOk and kTfLiteError. - // TODO(b/149795762): Add this to the TfLiteStatus enum. - TfLiteStatus Invoke(); - - size_t tensors_size() const { return context_.tensors_size; } - TfLiteTensor* tensor(size_t tensor_index); - template - T* typed_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } - - TfLiteTensor* input(size_t index); - size_t inputs_size() const { return subgraph_->inputs()->Length(); } - const flatbuffers::Vector& inputs() const { - return *subgraph_->inputs(); - } - TfLiteTensor* input_tensor(size_t index) { return input(index); } - template - T* typed_input_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = input_tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } - - TfLiteTensor* output(size_t index); - size_t outputs_size() const { return subgraph_->outputs()->Length(); } - const flatbuffers::Vector& outputs() const { - return *subgraph_->outputs(); - } - TfLiteTensor* output_tensor(size_t index) { return output(index); } - template - T* typed_output_tensor(int tensor_index) { - if (TfLiteTensor* tensor_ptr = output_tensor(tensor_index)) { - if (tensor_ptr->type == typeToTfLiteType()) { - return GetTensorData(tensor_ptr); - } - } - return nullptr; - } - - // Reset all variable tensors to the default value. - TfLiteStatus ResetVariableTensors(); - - TfLiteStatus initialization_status() const { return initialization_status_; } - - size_t operators_size() const { return subgraph_->operators()->size(); } - - // For debugging only. - const NodeAndRegistration node_and_registration(int node_index) const { - return node_and_registrations_[node_index]; - } - - // For debugging only. - // Returns the actual used arena in bytes. This method gives the optimal arena - // size. It's only available after `AllocateTensors` has been called. - // Note that normally `tensor_arena` requires 16 bytes alignment to fully - // utilize the space. If it's not the case, the optimial arena size would be - // arena_used_bytes() + 16. - size_t arena_used_bytes() const { return allocator_.used_bytes(); } - - protected: - const MicroAllocator& allocator() const { return allocator_; } - const TfLiteContext& context() const { return context_; } - - private: - // TODO(b/158263161): Consider switching to Create() function to enable better - // error reporting during initialization. - void Init(tflite::Profiler* profiler); - - void CorrectTensorEndianness(TfLiteEvalTensor* tensorCorr); - - template - void CorrectTensorDataEndianness(T* data, int32_t size); - - NodeAndRegistration* node_and_registrations_ = nullptr; - - const Model* model_; - const MicroOpResolver& op_resolver_; - ErrorReporter* error_reporter_; - TfLiteContext context_ = {}; - MicroAllocator& allocator_; - bool tensors_allocated_; - - TfLiteStatus initialization_status_; - - const SubGraph* subgraph_ = nullptr; - TfLiteEvalTensor* eval_tensors_ = nullptr; - ScratchBufferHandle* scratch_buffer_handles_ = nullptr; - - // TODO(b/16157777): Drop this reference: - internal::ContextHelper context_helper_; - - // TODO(b/162311891): Clean these pointers up when this class supports buffers - // from TfLiteEvalTensor. - TfLiteTensor* input_tensor_; - TfLiteTensor* output_tensor_; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_mutable_op_resolver.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_mutable_op_resolver.h deleted file mode 100644 index 790b93b4..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ /dev/null @@ -1,485 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/op_macros.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/kernels/ethosu.h" -#include "tensorflow/lite/micro/kernels/fully_connected.h" -#include "tensorflow/lite/micro/kernels/micro_ops.h" -#include "tensorflow/lite/micro/micro_op_resolver.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { -TfLiteRegistration* Register_DETECTION_POSTPROCESS(); - -template -class MicroMutableOpResolver : public MicroOpResolver { - public: - TF_LITE_REMOVE_VIRTUAL_DELETE - - explicit MicroMutableOpResolver(ErrorReporter* error_reporter = nullptr) - : error_reporter_(error_reporter) {} - - const TfLiteRegistration* FindOp(tflite::BuiltinOperator op) const override { - if (op == BuiltinOperator_CUSTOM) return nullptr; - - for (unsigned int i = 0; i < registrations_len_; ++i) { - const TfLiteRegistration& registration = registrations_[i]; - if (registration.builtin_code == op) { - return ®istration; - } - } - return nullptr; - } - - const TfLiteRegistration* FindOp(const char* op) const override { - for (unsigned int i = 0; i < registrations_len_; ++i) { - const TfLiteRegistration& registration = registrations_[i]; - if ((registration.builtin_code == BuiltinOperator_CUSTOM) && - (strcmp(registration.custom_name, op) == 0)) { - return ®istration; - } - } - return nullptr; - } - - MicroOpResolver::BuiltinParseFunction GetOpDataParser( - BuiltinOperator op) const override { - TFLITE_DCHECK(num_buitin_ops_ <= tOpCount); - for (unsigned int i = 0; i < num_buitin_ops_; ++i) { - if (builtin_codes_[i] == op) return builtin_parsers_[i]; - } - return nullptr; - } - - // Registers a Custom Operator with the MicroOpResolver. - // - // Only the first call for a given name will be successful. i.e. if this - // function is called again for a previously added Custom Operator, the - // MicroOpResolver will be unchanged and this function will return - // kTfLiteError. - TfLiteStatus AddCustom(const char* name, TfLiteRegistration* registration) { - if (registrations_len_ >= tOpCount) { - if (error_reporter_) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Couldn't register custom op '%s', resolver size is too small (%d)", - name, tOpCount); - } - return kTfLiteError; - } - - if (FindOp(name) != nullptr) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Calling AddCustom for the same op more than once " - "is not supported (Op: %s).", - name); - } - return kTfLiteError; - } - - TfLiteRegistration* new_registration = ®istrations_[registrations_len_]; - registrations_len_ += 1; - - *new_registration = *registration; - new_registration->builtin_code = BuiltinOperator_CUSTOM; - new_registration->custom_name = name; - return kTfLiteOk; - } - - // The Add* functions below add the various Builtin operators to the - // MicroMutableOpResolver object. - - TfLiteStatus AddAbs() { - return AddBuiltin(BuiltinOperator_ABS, tflite::ops::micro::Register_ABS(), - ParseAbs); - } - - TfLiteStatus AddAdd() { - return AddBuiltin(BuiltinOperator_ADD, tflite::ops::micro::Register_ADD(), - ParseAdd); - } - - TfLiteStatus AddArgMax() { - return AddBuiltin(BuiltinOperator_ARG_MAX, - tflite::ops::micro::Register_ARG_MAX(), ParseArgMax); - } - - TfLiteStatus AddArgMin() { - return AddBuiltin(BuiltinOperator_ARG_MIN, - tflite::ops::micro::Register_ARG_MIN(), ParseArgMin); - } - - TfLiteStatus AddAveragePool2D() { - return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, - tflite::ops::micro::Register_AVERAGE_POOL_2D(), - ParsePool); - } - - TfLiteStatus AddCeil() { - return AddBuiltin(BuiltinOperator_CEIL, tflite::ops::micro::Register_CEIL(), - ParseCeil); - } - - TfLiteStatus AddCircularBuffer() { - return AddCustom("CIRCULAR_BUFFER", - tflite::ops::micro::Register_CIRCULAR_BUFFER()); - } - - TfLiteStatus AddConcatenation() { - return AddBuiltin(BuiltinOperator_CONCATENATION, - tflite::ops::micro::Register_CONCATENATION(), - ParseConcatenation); - } - - TfLiteStatus AddConv2D() { - return AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(), ParseConv2D); - } - - TfLiteStatus AddCos() { - return AddBuiltin(BuiltinOperator_COS, tflite::ops::micro::Register_COS(), - ParseCos); - } - - TfLiteStatus AddDepthwiseConv2D() { - return AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, - Register_DEPTHWISE_CONV_2D(), ParseDepthwiseConv2D); - } - - TfLiteStatus AddDequantize() { - return AddBuiltin(BuiltinOperator_DEQUANTIZE, - tflite::ops::micro::Register_DEQUANTIZE(), - ParseDequantize); - } - - TfLiteStatus AddDetectionPostprocess() { - return AddCustom("TFLite_Detection_PostProcess", - tflite::Register_DETECTION_POSTPROCESS()); - } - - TfLiteStatus AddEqual() { - return AddBuiltin(BuiltinOperator_EQUAL, - tflite::ops::micro::Register_EQUAL(), ParseEqual); - } - - TfLiteStatus AddEthosU() { - TfLiteRegistration* registration = tflite::Register_ETHOSU(); - if (registration) { - return AddCustom(tflite::GetString_ETHOSU(), registration); - } - return kTfLiteOk; - } - - TfLiteStatus AddFloor() { - return AddBuiltin(BuiltinOperator_FLOOR, - tflite::ops::micro::Register_FLOOR(), ParseFloor); - } - - TfLiteStatus AddFullyConnected( - const TfLiteRegistration& registration = Register_FULLY_CONNECTED()) { - return AddBuiltin(BuiltinOperator_FULLY_CONNECTED, registration, - ParseFullyConnected); - } - - TfLiteStatus AddGreater() { - return AddBuiltin(BuiltinOperator_GREATER, - tflite::ops::micro::Register_GREATER(), ParseGreater); - } - - TfLiteStatus AddGreaterEqual() { - return AddBuiltin(BuiltinOperator_GREATER_EQUAL, - tflite::ops::micro::Register_GREATER_EQUAL(), - ParseGreaterEqual); - } - - TfLiteStatus AddHardSwish() { - return AddBuiltin(BuiltinOperator_HARD_SWISH, - tflite::ops::micro::Register_HARD_SWISH(), - ParseHardSwish); - } - - TfLiteStatus AddL2Normalization() { - return AddBuiltin(BuiltinOperator_L2_NORMALIZATION, - tflite::ops::micro::Register_L2_NORMALIZATION(), - ParseL2Normalization); - } - - TfLiteStatus AddLess() { - return AddBuiltin(BuiltinOperator_LESS, tflite::ops::micro::Register_LESS(), - ParseLess); - } - - TfLiteStatus AddLessEqual() { - return AddBuiltin(BuiltinOperator_LESS_EQUAL, - tflite::ops::micro::Register_LESS_EQUAL(), - ParseLessEqual); - } - - TfLiteStatus AddLog() { - return AddBuiltin(BuiltinOperator_LOG, tflite::ops::micro::Register_LOG(), - ParseLog); - } - - TfLiteStatus AddLogicalAnd() { - return AddBuiltin(BuiltinOperator_LOGICAL_AND, - tflite::ops::micro::Register_LOGICAL_AND(), - ParseLogicalAnd); - } - - TfLiteStatus AddLogicalNot() { - return AddBuiltin(BuiltinOperator_LOGICAL_NOT, - tflite::ops::micro::Register_LOGICAL_NOT(), - ParseLogicalNot); - } - - TfLiteStatus AddLogicalOr() { - return AddBuiltin(BuiltinOperator_LOGICAL_OR, - tflite::ops::micro::Register_LOGICAL_OR(), - ParseLogicalOr); - } - - TfLiteStatus AddLogistic() { - return AddBuiltin(BuiltinOperator_LOGISTIC, - tflite::ops::micro::Register_LOGISTIC(), ParseLogistic); - } - - TfLiteStatus AddMaximum() { - return AddBuiltin(BuiltinOperator_MAXIMUM, - tflite::ops::micro::Register_MAXIMUM(), ParseMaximum); - } - - TfLiteStatus AddMaxPool2D() { - return AddBuiltin(BuiltinOperator_MAX_POOL_2D, - tflite::ops::micro::Register_MAX_POOL_2D(), ParsePool); - } - - TfLiteStatus AddMean() { - return AddBuiltin(BuiltinOperator_MEAN, tflite::ops::micro::Register_MEAN(), - ParseReducer); - } - - TfLiteStatus AddMinimum() { - return AddBuiltin(BuiltinOperator_MINIMUM, - tflite::ops::micro::Register_MINIMUM(), ParseMinimum); - } - - TfLiteStatus AddMul() { - return AddBuiltin(BuiltinOperator_MUL, tflite::ops::micro::Register_MUL(), - ParseMul); - } - - TfLiteStatus AddNeg() { - return AddBuiltin(BuiltinOperator_NEG, tflite::ops::micro::Register_NEG(), - ParseNeg); - } - - TfLiteStatus AddNotEqual() { - return AddBuiltin(BuiltinOperator_NOT_EQUAL, - tflite::ops::micro::Register_NOT_EQUAL(), ParseNotEqual); - } - - TfLiteStatus AddPack() { - return AddBuiltin(BuiltinOperator_PACK, tflite::ops::micro::Register_PACK(), - ParsePack); - } - - TfLiteStatus AddPad() { - return AddBuiltin(BuiltinOperator_PAD, tflite::ops::micro::Register_PAD(), - ParsePad); - } - - TfLiteStatus AddPadV2() { - return AddBuiltin(BuiltinOperator_PADV2, - tflite::ops::micro::Register_PADV2(), ParsePadV2); - } - - TfLiteStatus AddPrelu() { - return AddBuiltin(BuiltinOperator_PRELU, - tflite::ops::micro::Register_PRELU(), ParsePrelu); - } - - TfLiteStatus AddQuantize() { - return AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(), - ParseQuantize); - } - - TfLiteStatus AddReduceMax() { - return AddBuiltin(BuiltinOperator_REDUCE_MAX, - tflite::ops::micro::Register_REDUCE_MAX(), ParseReducer); - } - - TfLiteStatus AddRelu() { - return AddBuiltin(BuiltinOperator_RELU, tflite::ops::micro::Register_RELU(), - ParseRelu); - } - - TfLiteStatus AddRelu6() { - return AddBuiltin(BuiltinOperator_RELU6, - tflite::ops::micro::Register_RELU6(), ParseRelu6); - } - - TfLiteStatus AddReshape() { - return AddBuiltin(BuiltinOperator_RESHAPE, - tflite::ops::micro::Register_RESHAPE(), ParseReshape); - } - - TfLiteStatus AddResizeNearestNeighbor() { - return AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, - tflite::ops::micro::Register_RESIZE_NEAREST_NEIGHBOR(), - ParseResizeNearestNeighbor); - } - - TfLiteStatus AddRound() { - return AddBuiltin(BuiltinOperator_ROUND, - tflite::ops::micro::Register_ROUND(), ParseRound); - } - - TfLiteStatus AddRsqrt() { - return AddBuiltin(BuiltinOperator_RSQRT, - tflite::ops::micro::Register_RSQRT(), ParseRsqrt); - } - - TfLiteStatus AddShape() { - return AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE(), ParseShape); - } - - TfLiteStatus AddSin() { - return AddBuiltin(BuiltinOperator_SIN, tflite::ops::micro::Register_SIN(), - ParseSin); - } - - TfLiteStatus AddSoftmax() { - return AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(), - ParseSoftmax); - } - - TfLiteStatus AddSplit() { - return AddBuiltin(BuiltinOperator_SPLIT, - tflite::ops::micro::Register_SPLIT(), ParseSplit); - } - - TfLiteStatus AddSplitV() { - return AddBuiltin(BuiltinOperator_SPLIT_V, - tflite::ops::micro::Register_SPLIT_V(), ParseSplitV); - } - - TfLiteStatus AddSqrt() { - return AddBuiltin(BuiltinOperator_SQRT, tflite::ops::micro::Register_SQRT(), - ParseSqrt); - } - - TfLiteStatus AddSquare() { - return AddBuiltin(BuiltinOperator_SQUARE, - tflite::ops::micro::Register_SQUARE(), ParseSquare); - } - - TfLiteStatus AddStridedSlice() { - return AddBuiltin(BuiltinOperator_STRIDED_SLICE, - tflite::ops::micro::Register_STRIDED_SLICE(), - ParseStridedSlice); - } - - TfLiteStatus AddSub() { - return AddBuiltin(BuiltinOperator_SUB, tflite::ops::micro::Register_SUB(), - ParseSub); - } - - TfLiteStatus AddSvdf() { - return AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(), ParseSvdf); - } - - TfLiteStatus AddTanh() { - return AddBuiltin(BuiltinOperator_TANH, tflite::ops::micro::Register_TANH(), - ParseTanh); - } - - TfLiteStatus AddUnpack() { - return AddBuiltin(BuiltinOperator_UNPACK, - tflite::ops::micro::Register_UNPACK(), ParseUnpack); - } - - unsigned int GetRegistrationLength() { return registrations_len_; } - - private: - TfLiteStatus AddBuiltin(tflite::BuiltinOperator op, - const TfLiteRegistration& registration, - MicroOpResolver::BuiltinParseFunction parser) { - if (op == BuiltinOperator_CUSTOM) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invalid parameter BuiltinOperator_CUSTOM to the " - "AddBuiltin function."); - } - return kTfLiteError; - } - - if (FindOp(op) != nullptr) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Calling AddBuiltin with the same op more than " - "once is not supported (Op: #%d).", - op); - } - return kTfLiteError; - } - - if (registrations_len_ >= tOpCount) { - if (error_reporter_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Couldn't register builtin op #%d, resolver size " - "is too small (%d).", - op, tOpCount); - } - return kTfLiteError; - } - - registrations_[registrations_len_] = registration; - // Strictly speaking, the builtin_code is not necessary for TFLM but filling - // it in regardless. - registrations_[registrations_len_].builtin_code = op; - registrations_len_++; - - builtin_codes_[num_buitin_ops_] = op; - builtin_parsers_[num_buitin_ops_] = parser; - num_buitin_ops_++; - - return kTfLiteOk; - } - - TfLiteRegistration registrations_[tOpCount]; - unsigned int registrations_len_ = 0; - - // Arrays (and counter) to store the builtin codes and their corresponding - // parse functions as these are registered with the Op Resolver. - BuiltinOperator builtin_codes_[tOpCount]; - MicroOpResolver::BuiltinParseFunction builtin_parsers_[tOpCount]; - unsigned int num_buitin_ops_ = 0; - - ErrorReporter* error_reporter_; -}; - -}; // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_MUTABLE_OP_RESOLVER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_op_resolver.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_op_resolver.h deleted file mode 100644 index 757b6b89..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_op_resolver.h +++ /dev/null @@ -1,73 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "tensorflow/lite/core/api/op_resolver.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// This is an interface for the OpResolver for TFLiteMicro. The differences from -// the TFLite OpResolver base class are to: -// * explicitly remove support for Op versions -// * allow for finer grained registration of the Builtin Ops to reduce code -// size for TFLiteMicro. -// -// We need an interface class instead of directly using MicroMutableOpResolver -// because MicroMutableOpResolver is a class template with the number of -// registered Ops as the template parameter. -class MicroOpResolver : public OpResolver { - public: - typedef TfLiteStatus (*BuiltinParseFunction)(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - - // Returns the Op registration struct corresponding to the enum code from the - // flatbuffer schema. Returns nullptr if the op is not found or if op == - // BuiltinOperator_CUSTOM. - virtual const TfLiteRegistration* FindOp(BuiltinOperator op) const = 0; - - // Returns the Op registration struct corresponding to the custom operator by - // name. - virtual const TfLiteRegistration* FindOp(const char* op) const = 0; - - // This implementation exists for compatibility with the OpResolver base class - // and disregards the version parameter. - const TfLiteRegistration* FindOp(BuiltinOperator op, - int version) const final { - return FindOp(op); - } - - // This implementation exists for compatibility with the OpResolver base class - // and disregards the version parameter. - const TfLiteRegistration* FindOp(const char* op, int version) const final { - return FindOp(op); - } - - // Returns the operator specific parsing function for the OpData for a - // BuiltinOperator (if registered), else nullptr. - virtual BuiltinParseFunction GetOpDataParser(BuiltinOperator op) const = 0; - - ~MicroOpResolver() override {} -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_profiler.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_profiler.cc deleted file mode 100644 index 83fb9f64..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_profiler.cc +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/micro_profiler.h" - -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/micro_time.h" - -namespace tflite { - -MicroProfiler::MicroProfiler(tflite::ErrorReporter* reporter) - : reporter_(reporter) {} - -uint32_t MicroProfiler::BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) { - start_time_ = GetCurrentTimeTicks(); - TFLITE_DCHECK(tag != nullptr); - event_tag_ = tag; - return 0; -} - -void MicroProfiler::EndEvent(uint32_t event_handle) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - int32_t end_time = GetCurrentTimeTicks(); - TF_LITE_REPORT_ERROR(reporter_, "%s took %d cycles\n", event_tag_, - end_time - start_time_); -#endif -} -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_profiler.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_profiler.h deleted file mode 100644 index a3144b3a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_profiler.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ - -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/core/api/profiler.h" -#include "tensorflow/lite/micro/compatibility.h" - -namespace tflite { - -// MicroProfiler creates a common way to gain fine-grained insight into runtime -// performance. Bottleck operators can be identified along with slow code -// sections. This can be used in conjunction with running the relevant micro -// benchmark to evaluate end-to-end performance. -// -// Usage example: -// MicroProfiler profiler(error_reporter); -// { -// ScopedProfile scoped_profile(profiler, tag); -// work_to_profile(); -// } -// -// This will call the following methods in order: -// int event_handle = profiler->BeginEvent(op_name, EventType::DEFAULT, 0) -// work_to_profile(); -// profiler->EndEvent(event_handle) -class MicroProfiler : public tflite::Profiler { - public: - explicit MicroProfiler(tflite::ErrorReporter* reporter); - ~MicroProfiler() override = default; - - // AddEvent is unused for Tf Micro. - void AddEvent(const char* tag, EventType event_type, uint64_t start, - uint64_t end, int64_t event_metadata1, - int64_t event_metadata2) override{}; - - // BeginEvent followed by code followed by EndEvent will profile the code - // enclosed. Multiple concurrent events are unsupported, so the return value - // is always 0. Event_metadata1 and event_metadata2 are unused. The tag - // pointer must be valid until EndEvent is called. - uint32_t BeginEvent(const char* tag, EventType event_type, - int64_t event_metadata1, - int64_t event_metadata2) override; - - // Event_handle is ignored since TF Micro does not support concurrent events. - void EndEvent(uint32_t event_handle) override; - - private: - tflite::ErrorReporter* reporter_; - int32_t start_time_; - const char* event_tag_; - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_string.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_string.cc deleted file mode 100644 index ad769f69..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_string.cc +++ /dev/null @@ -1,309 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Implements debug logging for numbers by converting them into strings and then -// calling the main DebugLog(char*) function. These are separated into a -// different file so that platforms can just implement the string output version -// of DebugLog() and then get the numerical variations without requiring any -// more code. - -#include "tensorflow/lite/micro/micro_string.h" - -#include -#include -#include - -namespace { - -// Int formats can need up to 10 bytes for the value plus a single byte for the -// sign. -constexpr int kMaxIntCharsNeeded = 10 + 1; -// Hex formats can need up to 8 bytes for the value plus two bytes for the "0x". -constexpr int kMaxHexCharsNeeded = 8 + 2; - -// Float formats can need up to 7 bytes for the fraction plus 3 bytes for "x2^" -// plus 3 bytes for the exponent and a single sign bit. -constexpr float kMaxFloatCharsNeeded = 7 + 3 + 3 + 1; - -// All input buffers to the number conversion functions must be this long. -const int kFastToBufferSize = 48; - -// Reverses a zero-terminated string in-place. -char* ReverseStringInPlace(char* start, char* end) { - char* p1 = start; - char* p2 = end - 1; - while (p1 < p2) { - char tmp = *p1; - *p1++ = *p2; - *p2-- = tmp; - } - return start; -} - -// Appends a string to a string, in-place. You need to pass in the maximum -// string length as the second argument. -char* StrCatStr(char* main, int main_max_length, const char* to_append) { - char* current = main; - while (*current != 0) { - ++current; - } - char* current_end = main + (main_max_length - 1); - while ((*to_append != 0) && (current < current_end)) { - *current = *to_append; - ++current; - ++to_append; - } - *current = 0; - return current; -} - -// Populates the provided buffer with an ASCII representation of the number. -char* FastUInt32ToBufferLeft(uint32_t i, char* buffer, int base) { - char* start = buffer; - do { - int32_t digit = i % base; - char character; - if (digit < 10) { - character = '0' + digit; - } else { - character = 'a' + (digit - 10); - } - *buffer++ = character; - i /= base; - } while (i > 0); - *buffer = 0; - ReverseStringInPlace(start, buffer); - return buffer; -} - -// Populates the provided buffer with an ASCII representation of the number. -char* FastInt32ToBufferLeft(int32_t i, char* buffer) { - uint32_t u = i; - if (i < 0) { - *buffer++ = '-'; - u = -u; - } - return FastUInt32ToBufferLeft(u, buffer, 10); -} - -// Converts a number to a string and appends it to another. -char* StrCatInt32(char* main, int main_max_length, int32_t number) { - char number_string[kFastToBufferSize]; - FastInt32ToBufferLeft(number, number_string); - return StrCatStr(main, main_max_length, number_string); -} - -// Converts a number to a string and appends it to another. -char* StrCatUInt32(char* main, int main_max_length, uint32_t number, int base) { - char number_string[kFastToBufferSize]; - FastUInt32ToBufferLeft(number, number_string, base); - return StrCatStr(main, main_max_length, number_string); -} - -// Populates the provided buffer with ASCII representation of the float number. -// Avoids the use of any floating point instructions (since these aren't -// supported on many microcontrollers) and as a consequence prints values with -// power-of-two exponents. -char* FastFloatToBufferLeft(float f, char* buffer) { - char* current = buffer; - char* current_end = buffer + (kFastToBufferSize - 1); - // Access the bit fields of the floating point value to avoid requiring any - // float instructions. These constants are derived from IEEE 754. - const uint32_t sign_mask = 0x80000000; - const uint32_t exponent_mask = 0x7f800000; - const int32_t exponent_shift = 23; - const int32_t exponent_bias = 127; - const uint32_t fraction_mask = 0x007fffff; - uint32_t u; - memcpy(&u, &f, sizeof(int32_t)); - const int32_t exponent = - ((u & exponent_mask) >> exponent_shift) - exponent_bias; - const uint32_t fraction = (u & fraction_mask); - // Expect ~0x2B1B9D3 for fraction. - if (u & sign_mask) { - *current = '-'; - current += 1; - } - *current = 0; - // These are special cases for infinities and not-a-numbers. - if (exponent == 128) { - if (fraction == 0) { - current = StrCatStr(current, (current_end - current), "Inf"); - return current; - } else { - current = StrCatStr(current, (current_end - current), "NaN"); - return current; - } - } - // 0x007fffff (8388607) represents 0.99... for the fraction, so to print the - // correct decimal digits we need to scale our value before passing it to the - // conversion function. This scale should be 10000000/8388608 = 1.1920928955. - // We can approximate this using multiply-adds and right-shifts using the - // values in this array. The 1. portion of the number string is printed out - // in a fixed way before the fraction, below. - const int32_t scale_shifts_size = 13; - const int8_t scale_shifts[13] = {3, 4, 8, 11, 13, 14, 17, - 18, 19, 20, 21, 22, 23}; - uint32_t scaled_fraction = fraction; - for (int i = 0; i < scale_shifts_size; ++i) { - scaled_fraction += (fraction >> scale_shifts[i]); - } - *current = '1'; - current += 1; - *current = '.'; - current += 1; - *current = 0; - - // Prepend leading zeros to fill in all 7 bytes of the fraction. Truncate - // zeros off the end of the fraction. Every fractional value takes 7 bytes. - // For example, 2500 would be written into the buffer as 0002500 since it - // represents .00025. - constexpr int kMaxFractionalDigits = 7; - - // Abort early if there is not enough space in the buffer. - if (current_end - current <= kMaxFractionalDigits) { - return current; - } - - // Pre-fill buffer with zeros to ensure zero-truncation works properly. - for (int i = 1; i < kMaxFractionalDigits; i++) { - *(current + i) = '0'; - } - - // Track how large the fraction is to add leading zeros. - char* previous = current; - current = StrCatUInt32(current, (current_end - current), scaled_fraction, 10); - int fraction_digits = current - previous; - int leading_zeros = kMaxFractionalDigits - fraction_digits; - - // Overwrite the null terminator from StrCatUInt32 to ensure zero-trunctaion - // works properly. - *current = '0'; - - // Shift fraction values and prepend zeros if necessary. - if (leading_zeros != 0) { - for (int i = 0; i < fraction_digits; i++) { - current--; - *(current + leading_zeros) = *current; - *current = '0'; - } - current += kMaxFractionalDigits; - } - - // Truncate trailing zeros for cleaner logs. Ensure we leave at least one - // fractional character for the case when scaled_fraction is 0. - while (*(current - 1) == '0' && (current - 1) > previous) { - current--; - } - *current = 0; - current = StrCatStr(current, (current_end - current), "*2^"); - current = StrCatInt32(current, (current_end - current), exponent); - return current; -} - -int FormatInt32(char* output, int32_t i) { - return static_cast(FastInt32ToBufferLeft(i, output) - output); -} - -int FormatUInt32(char* output, uint32_t i) { - return static_cast(FastUInt32ToBufferLeft(i, output, 10) - output); -} - -int FormatHex(char* output, uint32_t i) { - return static_cast(FastUInt32ToBufferLeft(i, output, 16) - output); -} - -int FormatFloat(char* output, float i) { - return static_cast(FastFloatToBufferLeft(i, output) - output); -} - -} // namespace - -extern "C" int MicroVsnprintf(char* output, int len, const char* format, - va_list args) { - int output_index = 0; - const char* current = format; - // One extra character must be left for the null terminator. - const int usable_length = len - 1; - while (*current != '\0' && output_index < usable_length) { - if (*current == '%') { - current++; - switch (*current) { - case 'd': - // Cut off log message if format could exceed log buffer length. - if (usable_length - output_index < kMaxIntCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output_index += - FormatInt32(&output[output_index], va_arg(args, int32_t)); - current++; - break; - case 'u': - if (usable_length - output_index < kMaxIntCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output_index += - FormatUInt32(&output[output_index], va_arg(args, uint32_t)); - current++; - break; - case 'x': - if (usable_length - output_index < kMaxHexCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output[output_index++] = '0'; - output[output_index++] = 'x'; - output_index += - FormatHex(&output[output_index], va_arg(args, uint32_t)); - current++; - break; - case 'f': - if (usable_length - output_index < kMaxFloatCharsNeeded) { - output[output_index++] = '\0'; - return output_index; - } - output_index += - FormatFloat(&output[output_index], va_arg(args, double)); - current++; - break; - case '%': - output[output_index++] = *current++; - break; - case 's': - char* string = va_arg(args, char*); - int string_idx = 0; - while (string_idx + output_index < usable_length && - string[string_idx] != '\0') { - output[output_index++] = string[string_idx++]; - } - current++; - } - } else { - output[output_index++] = *current++; - } - } - output[output_index++] = '\0'; - return output_index; -} - -extern "C" int MicroSnprintf(char* output, int len, const char* format, ...) { - va_list args; - va_start(args, format); - int bytes_written = MicroVsnprintf(output, len, format, args); - va_end(args); - return bytes_written; -} diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_string.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_string.h deleted file mode 100644 index 59303e82..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_string.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ - -#include - -// Implements simple string formatting for numeric types. Returns the number of -// bytes written to output. -extern "C" { -// Functionally equivalent to vsnprintf, trimmed down for TFLite Micro. -// MicroSnprintf() is implemented using MicroVsnprintf(). -int MicroVsnprintf(char* output, int len, const char* format, va_list args); -// Functionally equavalent to snprintf, trimmed down for TFLite Micro. -// For example, MicroSnprintf(buffer, 10, "int %d", 10) will put the string -// "int 10" in the buffer. -// Floating point values are logged in exponent notation (1.XXX*2^N). -int MicroSnprintf(char* output, int len, const char* format, ...); -} - -#endif // TENSORFLOW_LITE_MICRO_MICRO_STRING_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_time.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_time.cc deleted file mode 100644 index d7c51f90..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_time.cc +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Reference implementation of timer functions. Platforms are not required to -// implement these timer methods, but they are required to enable profiling. - -// On platforms that have a POSIX stack or C library, it can be written using -// methods from or clock() from . - -// To add an equivalent function for your own platform, create your own -// implementation file, and place it in a subfolder with named after the OS -// you're targeting. For example, see the Cortex M bare metal version in -// tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on -// tensorflow/lite/micro/mbed/micro_time.cc. - -#include "tensorflow/lite/micro/micro_time.h" - -#if defined(TF_LITE_USE_CTIME) -#include -#endif - -namespace tflite { - -#if !defined(TF_LITE_USE_CTIME) - -// Reference implementation of the ticks_per_second() function that's required -// for a platform to support Tensorflow Lite for Microcontrollers profiling. -// This returns 0 by default because timing is an optional feature that builds -// without errors on platforms that do not need it. -int32_t ticks_per_second() { return 0; } - -// Reference implementation of the GetCurrentTimeTicks() function that's -// required for a platform to support Tensorflow Lite for Microcontrollers -// profiling. This returns 0 by default because timing is an optional feature -// that builds without errors on platforms that do not need it. -int32_t GetCurrentTimeTicks() { return 0; } - -#else // defined(TF_LITE_USE_CTIME) - -// For platforms that support ctime, we implment the micro_time interface in -// this central location. -int32_t ticks_per_second() { return CLOCKS_PER_SEC; } - -int32_t GetCurrentTimeTicks() { return clock(); } -#endif - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_time.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_time.h deleted file mode 100644 index 465490a8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_time.h +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ - -#include - -namespace tflite { - -// These functions should be implemented by each target platform, and provide an -// accurate tick count along with how many ticks there are per second. -int32_t ticks_per_second(); - -// Return time in ticks. The meaning of a tick varies per platform. -int32_t GetCurrentTimeTicks(); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_TIME_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_utils.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_utils.cc deleted file mode 100644 index 96152364..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_utils.cc +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/micro_utils.h" - -#include -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { - -int ElementCount(const TfLiteIntArray& dims) { - int result = 1; - for (int i = 0; i < dims.size; ++i) { - result *= dims.data[i]; - } - return result; -} - -void SignedSymmetricPerChannelQuantize(const float* values, - TfLiteIntArray* dims, - int quantized_dimension, - int8_t* quantized_values, - float* scaling_factors) { - int input_size = ElementCount(*dims); - int channel_count = dims->data[quantized_dimension]; - int per_channel_size = input_size / channel_count; - - int stride; - int channel_stride; - if (quantized_dimension == 0) { - stride = 1; - channel_stride = per_channel_size; - } else if (quantized_dimension == 3) { - stride = channel_count; - channel_stride = 1; - } else { - TF_LITE_FATAL("quantized dimension must be 0 or 3"); - } - - // Calculate scales for each channel. - for (int channel = 0; channel < channel_count; channel++) { - float min = 0; - float max = 0; - - for (int i = 0; i < per_channel_size; i++) { - int idx = channel * channel_stride + i * stride; - min = fminf(min, values[idx]); - max = fmaxf(max, values[idx]); - } - scaling_factors[channel] = - fmaxf(fabs(min), fabs(max)) / std::numeric_limits::max(); - for (int i = 0; i < per_channel_size; i++) { - int idx = channel * channel_stride + i * stride; - const int32_t quantized_value = - static_cast(roundf(values[idx] / scaling_factors[channel])); - // Clamp: just in case some odd numeric offset. - quantized_values[idx] = - fminf(std::numeric_limits::max(), - fmaxf(std::numeric_limits::min() + 1, quantized_value)); - } - } -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_utils.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_utils.h deleted file mode 100644 index b9a3121a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/micro_utils.h +++ /dev/null @@ -1,134 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ - -#include -#include -#include - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -// Returns number of elements in the shape array. - -int ElementCount(const TfLiteIntArray& dims); - -// Converts a float value into a quantized value. Note that large values (close -// to max int and min int) may see significant error due to a lack of floating -// point granularity for large values. -template -T FloatToQuantizedType(const float value, const float scale, int zero_point) { - int32_t result = round(value / scale) + zero_point; - result = - std::max(static_cast(std::numeric_limits::min()), result); - result = - std::min(static_cast(std::numeric_limits::max()), result); - return result; -} - -template -T FloatToSymmetricQuantizedType(const float value, const float scale) { - int32_t result = round(value / scale); - result = - std::max(static_cast(std::numeric_limits::min() + 1), result); - result = - std::min(static_cast(std::numeric_limits::max()), result); - return result; -} - -// Helper methods to quantize arrays of floats to the desired format. -// -// There are several key flavors of quantization in TfLite: -// asymmetric symmetric per channel -// int8_t | X | X | X | -// uint8_t | X | X | | -// int16_t | X | | | -// int32_t | | X | X | -// -// The per-op quantization spec can be found here: -// https://www.tensorflow.org/lite/performance/quantization_spec -template -void Quantize(const float* input, T* output, int num_elements, float scale, - int zero_point) { - for (int i = 0; i < num_elements; i++) { - output[i] = FloatToQuantizedType(input[i], scale, zero_point); - } -} - -template -void SymmetricQuantize(const float* input, T* output, int num_elements, - float scale) { - for (int i = 0; i < num_elements; i++) { - output[i] = FloatToSymmetricQuantizedType(input[i], scale); - } -} - -template -void SymmetricPerChannelQuantize(const float* input, T* output, - int num_elements, int num_channels, - float* scales) { - int elements_per_channel = num_elements / num_channels; - for (int i = 0; i < num_channels; i++) { - for (int j = 0; j < elements_per_channel; j++) { - output[i * elements_per_channel + j] = FloatToSymmetricQuantizedType( - input[i * elements_per_channel + j], scales[i]); - } - } -} - -void SignedSymmetricPerChannelQuantize(const float* values, - TfLiteIntArray* dims, - int quantized_dimension, - int8_t* quantized_values, - float* scaling_factor); - -// Quantizes inputs based on the values provided, choosing the smallest range -// which includes all input values. -template -void SymmetricQuantizeCalculateScales(const float* values, TfLiteIntArray* dims, - T* output, float* scale) { - int input_size = ElementCount(*dims); - - float min = 0; - float max = 0; - for (int i = 0; i < input_size; i++) { - min = fminf(min, values[i]); - max = fmaxf(max, values[i]); - } - *scale = fmaxf(std::abs(min), std::abs(max)) / std::numeric_limits::max(); - for (int i = 0; i < input_size; i++) { - const int32_t quantized_value = - static_cast(roundf(values[i] / *scale)); - // Clamp: just in case some odd numeric offset. - quantized_value = fminf(std::numeric_limits::max(), quantized_value); - quantized_value = fmaxf(std::numeric_limits::min() + 1, quantized_value); - output[i] = quantized_value; - } -} - -template -void Dequantize(const T* values, const int size, const float scale, - int zero_point, float* dequantized_values) { - for (int i = 0; i < size; ++i) { - dequantized_values[i] = (values[i] - zero_point) * scale; - } -} - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_allocator.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_allocator.cc deleted file mode 100644 index 6bb52974..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_allocator.cc +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/recording_micro_allocator.h" - -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/recording_simple_memory_allocator.h" - -namespace tflite { - -RecordingMicroAllocator::RecordingMicroAllocator( - RecordingSimpleMemoryAllocator* recording_memory_allocator, - ErrorReporter* error_reporter) - : MicroAllocator(recording_memory_allocator, error_reporter), - recording_memory_allocator_(recording_memory_allocator) {} - -RecordingMicroAllocator* RecordingMicroAllocator::Create( - uint8_t* tensor_arena, size_t arena_size, ErrorReporter* error_reporter) { - TFLITE_DCHECK(error_reporter != nullptr); - - RecordingSimpleMemoryAllocator* simple_memory_allocator = - RecordingSimpleMemoryAllocator::Create(error_reporter, tensor_arena, - arena_size); - TFLITE_DCHECK(simple_memory_allocator != nullptr); - - uint8_t* allocator_buffer = simple_memory_allocator->AllocateFromTail( - sizeof(RecordingMicroAllocator), alignof(RecordingMicroAllocator)); - RecordingMicroAllocator* allocator = new (allocator_buffer) - RecordingMicroAllocator(simple_memory_allocator, error_reporter); - return allocator; -} - -RecordedAllocation RecordingMicroAllocator::GetRecordedAllocation( - RecordedAllocationType allocation_type) const { - switch (allocation_type) { - case RecordedAllocationType::kTfLiteEvalTensorData: - return recorded_tflite_eval_tensor_data_; - case RecordedAllocationType::kPersistentTfLiteTensorData: - return recorded_persistent_tflite_tensor_data_; - case RecordedAllocationType::kPersistentTfLiteTensorQuantizationData: - return recorded_persistent_tflite_tensor_quantization_data_; - case RecordedAllocationType::kPersistentBufferData: - return recorded_persistent_buffer_data_; - case RecordedAllocationType::kTfLiteTensorVariableBufferData: - return recorded_tflite_tensor_variable_buffer_data_; - case RecordedAllocationType::kNodeAndRegistrationArray: - return recorded_node_and_registration_array_data_; - case RecordedAllocationType::kOpData: - return recorded_op_data_; - } - TF_LITE_REPORT_ERROR(error_reporter(), "Invalid allocation type supplied: %d", - allocation_type); - return RecordedAllocation(); -} - -const RecordingSimpleMemoryAllocator* -RecordingMicroAllocator::GetSimpleMemoryAllocator() const { - return recording_memory_allocator_; -} - -void RecordingMicroAllocator::PrintAllocations() const { - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation total %d bytes", - recording_memory_allocator_->GetUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation head %d bytes", - recording_memory_allocator_->GetHeadUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation tail %d bytes", - recording_memory_allocator_->GetTailUsedBytes()); - PrintRecordedAllocation(RecordedAllocationType::kTfLiteEvalTensorData, - "TfLiteEvalTensor data", "allocations"); - PrintRecordedAllocation(RecordedAllocationType::kPersistentTfLiteTensorData, - "Persistent TfLiteTensor data", "tensors"); - PrintRecordedAllocation( - RecordedAllocationType::kPersistentTfLiteTensorQuantizationData, - "Persistent TfLiteTensor quantization data", "allocations"); - PrintRecordedAllocation(RecordedAllocationType::kPersistentBufferData, - "Persistent buffer data", "allocations"); - PrintRecordedAllocation( - RecordedAllocationType::kTfLiteTensorVariableBufferData, - "TfLiteTensor variable buffer data", "allocations"); - PrintRecordedAllocation(RecordedAllocationType::kNodeAndRegistrationArray, - "NodeAndRegistration struct", - "NodeAndRegistration structs"); - PrintRecordedAllocation(RecordedAllocationType::kOpData, - "Operator runtime data", "OpData structs"); -} - -void* RecordingMicroAllocator::AllocatePersistentBuffer(size_t bytes) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - void* buffer = MicroAllocator::AllocatePersistentBuffer(bytes); - RecordAllocationUsage(allocations, recorded_persistent_buffer_data_); - - return buffer; -} - -void RecordingMicroAllocator::PrintRecordedAllocation( - RecordedAllocationType allocation_type, const char* allocation_name, - const char* allocation_description) const { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - RecordedAllocation allocation = GetRecordedAllocation(allocation_type); - if (allocation.used_bytes > 0 || allocation.requested_bytes > 0) { - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] '%s' used %d bytes with alignment overhead " - "(requested %d bytes for %d %s)", - allocation_name, allocation.used_bytes, allocation.requested_bytes, - allocation.count, allocation_description); - } -#endif -} - -TfLiteStatus RecordingMicroAllocator::AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = MicroAllocator::AllocateNodeAndRegistrations( - model, node_and_registrations); - - RecordAllocationUsage(allocations, - recorded_node_and_registration_array_data_); - // The allocation count in SimpleMemoryAllocator will only be 1. To provide - // better logging, decrement by 1 and add in the actual number of operators - // used in the graph: - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of nodes in the - // graph (e.g. sizeof(NodeAndRegistration) * num_nodes). - // To prevent extra overhead and potential for fragmentation, manually adjust - // the accounting by decrementing by 1 and adding the actual number of nodes - // used in the graph: - recorded_node_and_registration_array_data_.count += - GetSubGraphFromModel(model)->operators()->size() - 1; - return status; -} - -TfLiteStatus -RecordingMicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - model, op_resolver, node_and_registrations); - - RecordAllocationUsage(allocations, recorded_op_data_); - return status; -} - -TfLiteStatus RecordingMicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::AllocateTfLiteEvalTensors(model, eval_tensors); - - RecordAllocationUsage(allocations, recorded_tflite_eval_tensor_data_); - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of tensors in the - // graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors). - // To prevent extra overhead and potential for fragmentation, manually adjust - // the accounting by decrementing by 1 and adding the actual number of tensors - // used in the graph: - recorded_tflite_eval_tensor_data_.count += - GetSubGraphFromModel(model)->tensors()->size() - 1; - return status; -} - -TfLiteStatus RecordingMicroAllocator::AllocateVariables( - const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::AllocateVariables(subgraph, eval_tensors); - - RecordAllocationUsage(allocations, - recorded_tflite_tensor_variable_buffer_data_); - return status; -} - -TfLiteTensor* RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteTensor* result = MicroAllocator::AllocatePersistentTfLiteTensorInternal( - model, eval_tensors, tensor_index); - - RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_data_); - return result; -} - -TfLiteStatus RecordingMicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - model, subgraph, tensor, tensor_index, allocate_temp); - - RecordAllocationUsage(allocations, - recorded_persistent_tflite_tensor_quantization_data_); - return status; -} - -RecordedAllocation RecordingMicroAllocator::SnapshotAllocationUsage() const { - return {/*requested_bytes=*/recording_memory_allocator_->GetRequestedBytes(), - /*used_bytes=*/recording_memory_allocator_->GetUsedBytes(), - /*count=*/recording_memory_allocator_->GetAllocatedCount()}; -} - -void RecordingMicroAllocator::RecordAllocationUsage( - const RecordedAllocation& snapshotted_allocation, - RecordedAllocation& recorded_allocation) { - recorded_allocation.requested_bytes += - recording_memory_allocator_->GetRequestedBytes() - - snapshotted_allocation.requested_bytes; - recorded_allocation.used_bytes += - recording_memory_allocator_->GetUsedBytes() - - snapshotted_allocation.used_bytes; - recorded_allocation.count += - recording_memory_allocator_->GetAllocatedCount() - - snapshotted_allocation.count; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_allocator.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_allocator.h deleted file mode 100644 index 47246e17..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_allocator.h +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/micro_allocator.h" -#include "tensorflow/lite/micro/recording_simple_memory_allocator.h" - -namespace tflite { - -// List of buckets currently recorded by this class. Each type keeps a list of -// allocated information during model initialization. -// TODO(b/169834511): Add tracking for scratch buffer allocations. -enum class RecordedAllocationType { - kTfLiteEvalTensorData, - kPersistentTfLiteTensorData, - kPersistentTfLiteTensorQuantizationData, - kPersistentBufferData, - kTfLiteTensorVariableBufferData, - kNodeAndRegistrationArray, - kOpData, -}; - -// Container for holding information about allocation recordings by a given -// type. Each recording contains the number of bytes requested, the actual bytes -// allocated (can defer from requested by alignment), and the number of items -// allocated. -struct RecordedAllocation { - size_t requested_bytes; - size_t used_bytes; - size_t count; -}; - -// Utility subclass of MicroAllocator that records all allocations -// inside the arena. A summary of allocations can be logged through the -// ErrorReporter by invoking LogAllocations(). This special allocator requires -// an instance of RecordingSimpleMemoryAllocator to capture allocations in the -// head and tail. Arena allocation recording can be retrieved by type through -// the GetRecordedAllocation() function. This class should only be used for -// auditing memory usage or integration testing. -class RecordingMicroAllocator : public MicroAllocator { - public: - static RecordingMicroAllocator* Create(uint8_t* tensor_arena, - size_t arena_size, - ErrorReporter* error_reporter); - - // Returns the recorded allocations information for a given allocation type. - RecordedAllocation GetRecordedAllocation( - RecordedAllocationType allocation_type) const; - - const RecordingSimpleMemoryAllocator* GetSimpleMemoryAllocator() const; - - // Logs out through the ErrorReporter all allocation recordings by type - // defined in RecordedAllocationType. - void PrintAllocations() const; - - void* AllocatePersistentBuffer(size_t bytes) override; - - protected: - TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, - NodeAndRegistration** node_and_registrations) override; - TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) override; - TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) override; - TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) override; - // TODO(b/162311891): Once all kernels have been updated to the new API drop - // this method. It is only used to record TfLiteTensor persistent allocations. - TfLiteTensor* AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, - int tensor_index) override; - // TODO(b/162311891): Once all kernels have been updated to the new API drop - // this function since all allocations for quantized data will take place in - // the temp section. - TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model, - const SubGraph* subgraph, - TfLiteTensor* tensor, - int tensor_index, - bool allocate_temp) override; - - private: - RecordingMicroAllocator(RecordingSimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); - - void PrintRecordedAllocation(RecordedAllocationType allocation_type, - const char* allocation_name, - const char* allocation_description) const; - - RecordedAllocation SnapshotAllocationUsage() const; - void RecordAllocationUsage(const RecordedAllocation& snapshotted_allocation, - RecordedAllocation& recorded_allocation); - - const RecordingSimpleMemoryAllocator* recording_memory_allocator_; - - RecordedAllocation recorded_tflite_eval_tensor_data_ = {}; - RecordedAllocation recorded_persistent_tflite_tensor_data_ = {}; - RecordedAllocation recorded_persistent_tflite_tensor_quantization_data_ = {}; - RecordedAllocation recorded_persistent_buffer_data_ = {}; - RecordedAllocation recorded_tflite_tensor_variable_buffer_data_ = {}; - RecordedAllocation recorded_node_and_registration_array_data_ = {}; - RecordedAllocation recorded_op_data_ = {}; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_interpreter.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_interpreter.h deleted file mode 100644 index 0a579b0b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_micro_interpreter.h +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ -#define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ - -#include "tensorflow/lite/micro/micro_interpreter.h" -#include "tensorflow/lite/micro/recording_micro_allocator.h" - -namespace tflite { - -// Utility subclass that enables internal recordings of the MicroInterpreter. -// This class should be used to audit and analyze memory arena usage for a given -// model and interpreter. -// -// After construction and the first Invoke() or AllocateTensors() call - the -// memory usage is recorded and available through the GetMicroAllocator() -// function. See RecordingMicroAlloctor for more details on what is currently -// recorded from arena allocations. -// -// It is recommended for users to increase the tensor arena size by at least 1kb -// to ensure enough additional memory is available for internal recordings. -class RecordingMicroInterpreter : public MicroInterpreter { - public: - RecordingMicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter) - : MicroInterpreter(model, op_resolver, - RecordingMicroAllocator::Create( - tensor_arena, tensor_arena_size, error_reporter), - error_reporter), - recording_micro_allocator_( - static_cast(allocator())) {} - - RecordingMicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - RecordingMicroAllocator* allocator, - ErrorReporter* error_reporter) - : MicroInterpreter(model, op_resolver, allocator, error_reporter), - recording_micro_allocator_(*allocator) {} - - const RecordingMicroAllocator& GetMicroAllocator() const { - return recording_micro_allocator_; - } - - private: - const RecordingMicroAllocator& recording_micro_allocator_; -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.cc deleted file mode 100644 index ef30aca4..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.cc +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/recording_simple_memory_allocator.h" - -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" - -namespace tflite { - -RecordingSimpleMemoryAllocator::RecordingSimpleMemoryAllocator( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) - : SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size), - requested_head_bytes_(0), - requested_tail_bytes_(0), - used_bytes_(0), - alloc_count_(0) {} - -RecordingSimpleMemoryAllocator::~RecordingSimpleMemoryAllocator() {} - -RecordingSimpleMemoryAllocator* RecordingSimpleMemoryAllocator::Create( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(buffer_head != nullptr); - RecordingSimpleMemoryAllocator tmp = - RecordingSimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); - - uint8_t* allocator_buffer = - tmp.AllocateFromTail(sizeof(RecordingSimpleMemoryAllocator), - alignof(RecordingSimpleMemoryAllocator)); - // Use the default copy constructor to populate internal states. - return new (allocator_buffer) RecordingSimpleMemoryAllocator(tmp); -} - -size_t RecordingSimpleMemoryAllocator::GetRequestedBytes() const { - return requested_head_bytes_ + requested_tail_bytes_; -} - -size_t RecordingSimpleMemoryAllocator::GetUsedBytes() const { - return used_bytes_; -} - -size_t RecordingSimpleMemoryAllocator::GetAllocatedCount() const { - return alloc_count_; -} - -TfLiteStatus RecordingSimpleMemoryAllocator::SetHeadBufferSize( - size_t size, size_t alignment) { - const uint8_t* previous_head = head(); - TfLiteStatus status = - SimpleMemoryAllocator::SetHeadBufferSize(size, alignment); - if (status == kTfLiteOk) { - used_bytes_ += head() - previous_head; - requested_head_bytes_ = size; - } - return status; -} - -uint8_t* RecordingSimpleMemoryAllocator::AllocateFromTail(size_t size, - size_t alignment) { - const uint8_t* previous_tail = tail(); - uint8_t* result = SimpleMemoryAllocator::AllocateFromTail(size, alignment); - if (result != nullptr) { - used_bytes_ += previous_tail - tail(); - requested_tail_bytes_ += size; - alloc_count_++; - } - return result; -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.h deleted file mode 100644 index 3526716e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/recording_simple_memory_allocator.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ - -#include "tensorflow/lite/micro/compatibility.h" -#include "tensorflow/lite/micro/simple_memory_allocator.h" - -namespace tflite { - -// Utility class used to log allocations of a SimpleMemoryAllocator. Should only -// be used in debug/evaluation settings or unit tests to evaluate allocation -// usage. -class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator { - public: - RecordingSimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer_head, size_t buffer_size); - // TODO(b/157615197): Cleanup constructors/destructor and use factory - // functions. - ~RecordingSimpleMemoryAllocator() override; - - static RecordingSimpleMemoryAllocator* Create(ErrorReporter* error_reporter, - uint8_t* buffer_head, - size_t buffer_size); - - // Returns the number of bytes requested from the head or tail. - size_t GetRequestedBytes() const; - - // Returns the number of bytes actually allocated from the head or tail. This - // value will be >= to the number of requested bytes due to padding and - // alignment. - size_t GetUsedBytes() const; - - // Returns the number of alloc calls from the head or tail. - size_t GetAllocatedCount() const; - - TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment) override; - uint8_t* AllocateFromTail(size_t size, size_t alignment) override; - - private: - size_t requested_head_bytes_; - size_t requested_tail_bytes_; - size_t used_bytes_; - size_t alloc_count_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/simple_memory_allocator.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/simple_memory_allocator.cc deleted file mode 100644 index 08b6789e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/simple_memory_allocator.cc +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/simple_memory_allocator.h" - -#include -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { - -SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer_head, - uint8_t* buffer_tail) - : error_reporter_(error_reporter), - buffer_head_(buffer_head), - buffer_tail_(buffer_tail), - head_(buffer_head), - tail_(buffer_tail), - temp_(buffer_head_) {} - -SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer, - size_t buffer_size) - : SimpleMemoryAllocator(error_reporter, buffer, buffer + buffer_size) {} - -/* static */ -SimpleMemoryAllocator* SimpleMemoryAllocator::Create( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(buffer_head != nullptr); - SimpleMemoryAllocator tmp = - SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); - - // Allocate enough bytes from the buffer to create a SimpleMemoryAllocator. - // The new instance will use the current adjusted tail buffer from the tmp - // allocator instance. - uint8_t* allocator_buffer = tmp.AllocateFromTail( - sizeof(SimpleMemoryAllocator), alignof(SimpleMemoryAllocator)); - // Use the default copy constructor to populate internal states. - return new (allocator_buffer) SimpleMemoryAllocator(tmp); -} - -SimpleMemoryAllocator::~SimpleMemoryAllocator() {} - -TfLiteStatus SimpleMemoryAllocator::SetHeadBufferSize(size_t size, - size_t alignment) { - if (head_ != temp_) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Internal error: SetHeadBufferSize() needs to be called " - "after ResetTempAllocations()."); - return kTfLiteError; - } - - uint8_t* const aligned_result = AlignPointerUp(buffer_head_, alignment); - const size_t available_memory = tail_ - aligned_result; - if (available_memory < size) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to set head size. Requested: %u, available %u, missing: %u", - size, available_memory, size - available_memory); - return kTfLiteError; - } - head_ = aligned_result + size; - temp_ = head_; - - return kTfLiteOk; -} - -uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, - size_t alignment) { - uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); - if (aligned_result < head_) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - const size_t missing_memory = head_ - aligned_result; - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate tail memory. Requested: %u, " - "available %u, missing: %u", - size, size - missing_memory, missing_memory); -#endif - return nullptr; - } - tail_ = aligned_result; - return aligned_result; -} - -uint8_t* SimpleMemoryAllocator::AllocateTemp(size_t size, size_t alignment) { - uint8_t* const aligned_result = AlignPointerUp(temp_, alignment); - const size_t available_memory = tail_ - aligned_result; - if (available_memory < size) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate temp memory. Requested: %u, " - "available %u, missing: %u", - size, available_memory, size - available_memory); - return nullptr; - } - temp_ = aligned_result + size; - return aligned_result; -} - -void SimpleMemoryAllocator::ResetTempAllocations() { temp_ = head_; } - -uint8_t* SimpleMemoryAllocator::GetHeadBuffer() const { return buffer_head_; } - -size_t SimpleMemoryAllocator::GetHeadUsedBytes() const { - return head_ - buffer_head_; -} - -size_t SimpleMemoryAllocator::GetTailUsedBytes() const { - return buffer_tail_ - tail_; -} - -size_t SimpleMemoryAllocator::GetAvailableMemory(size_t alignment) const { - uint8_t* const aligned_temp = AlignPointerUp(temp_, alignment); - uint8_t* const aligned_tail = AlignPointerDown(tail_, alignment); - return aligned_tail - aligned_temp; -} - -size_t SimpleMemoryAllocator::GetUsedBytes() const { - return GetBufferSize() - (tail_ - temp_); -} - -size_t SimpleMemoryAllocator::GetBufferSize() const { - return buffer_tail_ - buffer_head_; -} - -uint8_t* SimpleMemoryAllocator::head() const { return head_; } - -uint8_t* SimpleMemoryAllocator::tail() const { return tail_; } - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/simple_memory_allocator.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/simple_memory_allocator.h deleted file mode 100644 index 35adaf1a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/simple_memory_allocator.h +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ - -#include -#include - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/micro/compatibility.h" - -namespace tflite { - -// TODO(petewarden): This allocator never frees up or reuses any memory, even -// though we have enough information about lifetimes of the tensors to do so. -// This makes it pretty wasteful, so we should use a more intelligent method. -class SimpleMemoryAllocator { - public: - // TODO(b/157615197): Cleanup constructors/destructor and use factory - // functions. - SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer_head, - uint8_t* buffer_tail); - SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer, - size_t buffer_size); - virtual ~SimpleMemoryAllocator(); - - // Creates a new SimpleMemoryAllocator from a given buffer head and size. - static SimpleMemoryAllocator* Create(ErrorReporter* error_reporter, - uint8_t* buffer_head, - size_t buffer_size); - - // Adjusts the head (lowest address and moving upwards) memory allocation to a - // given size. Calls to this method will also invalidate all temporary - // allocation values (it sets the location of temp space at the end of the - // head section). This call will fail if a chain of allocations through - // AllocateTemp() have not been cleaned up with a call to - // ResetTempAllocations(). - virtual TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment); - - // Allocates memory starting at the tail of the arena (highest address and - // moving downwards). - virtual uint8_t* AllocateFromTail(size_t size, size_t alignment); - - // Allocates a temporary buffer from the head of the arena (lowest address and - // moving upwards) but does not update the actual head allocation size or - // position. The returned buffer is guaranteed until either - // ResetTempAllocations() is called or another call to AllocateFromHead(). - // Repeat calls to this function will create a chain of temp allocations. All - // calls to AllocateTemp() must end with a call to ResetTempAllocations(). If - // AllocateFromHead() is called before a call to ResetTempAllocations(), it - // will fail with an error message. - virtual uint8_t* AllocateTemp(size_t size, size_t alignment); - - // Resets a chain of temporary allocations back to the current head of the - // arena (lowest address). - virtual void ResetTempAllocations(); - - // Returns a pointer to the buffer currently assigned to the head section. - // This buffer is set by calling SetHeadSize(). - uint8_t* GetHeadBuffer() const; - - // Returns the size of the head section in bytes. - size_t GetHeadUsedBytes() const; - - // Returns the size of all allocations in the tail section in bytes. - size_t GetTailUsedBytes() const; - - // Returns the number of bytes available with a given alignment. This number - // takes in account any temporary allocations. - size_t GetAvailableMemory(size_t alignment) const; - - // Returns the number of used bytes in the allocator. This number takes in - // account any temporary allocations. - size_t GetUsedBytes() const; - - protected: - // Returns a pointer to the current end of the head buffer. - uint8_t* head() const; - - // Returns a pointer to the current end of the tail buffer. - uint8_t* tail() const; - - private: - size_t GetBufferSize() const; - - ErrorReporter* error_reporter_; - uint8_t* buffer_head_; - uint8_t* buffer_tail_; - uint8_t* head_; - uint8_t* tail_; - uint8_t* temp_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/test_helpers.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/test_helpers.cc deleted file mode 100644 index f73073f6..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/test_helpers.cc +++ /dev/null @@ -1,1079 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/test_helpers.h" - -#include -#include -#include -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/core/api/error_reporter.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/kernels/kernel_util.h" -#include "tensorflow/lite/micro/all_ops_resolver.h" -#include "tensorflow/lite/micro/micro_utils.h" -#include "tensorflow/lite/schema/schema_generated.h" - -// TODO(b/170464050): Use TFLM test only version of schema_utils. - -namespace tflite { -namespace testing { -namespace { - -class StackAllocator : public flatbuffers::Allocator { - public: - StackAllocator() : data_(data_backing_), data_size_(0) {} - - uint8_t* allocate(size_t size) override { - TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize); - uint8_t* result = data_; - data_ += size; - data_size_ += size; - return result; - } - - void deallocate(uint8_t* p, size_t) override {} - - static StackAllocator& instance() { - // Avoid using true dynamic memory allocation to be portable to bare metal. - static char inst_memory[sizeof(StackAllocator)]; - static StackAllocator* inst = new (inst_memory) StackAllocator; - return *inst; - } - - static constexpr size_t kStackAllocatorSize = 8192; - - private: - uint8_t data_backing_[kStackAllocatorSize]; - uint8_t* data_; - int data_size_; -}; - -flatbuffers::FlatBufferBuilder* BuilderInstance() { - static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)]; - static flatbuffers::FlatBufferBuilder* inst = - new (inst_memory) flatbuffers::FlatBufferBuilder( - StackAllocator::kStackAllocatorSize, &StackAllocator::instance()); - return inst; -} - -// A wrapper around FlatBuffer API to help build model easily. -class ModelBuilder { - public: - typedef int32_t Tensor; - typedef int Operator; - typedef int Node; - - // `builder` needs to be available until BuildModel is called. - explicit ModelBuilder(flatbuffers::FlatBufferBuilder* builder) - : builder_(builder) {} - - // Registers an operator that will be used in the model. - Operator RegisterOp(BuiltinOperator op, const char* custom_code); - - // Adds a tensor to the model. - Tensor AddTensor(TensorType type, std::initializer_list shape) { - return AddTensorImpl(type, /* is_variable */ false, shape); - } - - // Adds a variable tensor to the model. - Tensor AddVariableTensor(TensorType type, - std::initializer_list shape) { - return AddTensorImpl(type, /* is_variable */ true, shape); - } - - // Adds a node to the model with given input and output Tensors. - Node AddNode(Operator op, std::initializer_list inputs, - std::initializer_list outputs); - - void AddMetadata(const char* description_string, - const int32_t* metadata_buffer_data, size_t num_elements); - - // Constructs the flatbuffer model using `builder_` and return a pointer to - // it. The returned model has the same lifetime as `builder_`. - // Note the default value of 0 for num_subgraph_inputs means all tensor inputs - // are in subgraph input list. - const Model* BuildModel(std::initializer_list inputs, - std::initializer_list outputs, - size_t num_subgraph_inputs = 0); - - private: - // Adds a tensor to the model. - Tensor AddTensorImpl(TensorType type, bool is_variable, - std::initializer_list shape); - - flatbuffers::FlatBufferBuilder* builder_; - - static constexpr int kMaxOperatorCodes = 10; - flatbuffers::Offset operator_codes_[kMaxOperatorCodes]; - int next_operator_code_id_ = 0; - - static constexpr int kMaxOperators = 50; - flatbuffers::Offset operators_[kMaxOperators]; - int next_operator_id_ = 0; - - static constexpr int kMaxTensors = 50; - flatbuffers::Offset tensors_[kMaxTensors]; - - static constexpr int kMaxMetadataBuffers = 10; - - static constexpr int kMaxMetadatas = 10; - flatbuffers::Offset metadata_[kMaxMetadatas]; - - flatbuffers::Offset metadata_buffers_[kMaxMetadataBuffers]; - - int nbr_of_metadata_buffers_ = 0; - - int next_tensor_id_ = 0; -}; - -ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op, - const char* custom_code) { - TFLITE_DCHECK(next_operator_code_id_ <= kMaxOperatorCodes); - operator_codes_[next_operator_code_id_] = tflite::CreateOperatorCodeDirect( - *builder_, /*deprecated_builtin_code=*/0, custom_code, /*version=*/0, op); - next_operator_code_id_++; - return next_operator_code_id_ - 1; -} - -ModelBuilder::Node ModelBuilder::AddNode( - ModelBuilder::Operator op, - std::initializer_list inputs, - std::initializer_list outputs) { - TFLITE_DCHECK(next_operator_id_ <= kMaxOperators); - operators_[next_operator_id_] = tflite::CreateOperator( - *builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()), - builder_->CreateVector(outputs.begin(), outputs.size()), - BuiltinOptions_NONE); - next_operator_id_++; - return next_operator_id_ - 1; -} - -void ModelBuilder::AddMetadata(const char* description_string, - const int32_t* metadata_buffer_data, - size_t num_elements) { - metadata_[ModelBuilder::nbr_of_metadata_buffers_] = - CreateMetadata(*builder_, builder_->CreateString(description_string), - 1 + ModelBuilder::nbr_of_metadata_buffers_); - - metadata_buffers_[nbr_of_metadata_buffers_] = tflite::CreateBuffer( - *builder_, builder_->CreateVector((uint8_t*)metadata_buffer_data, - sizeof(uint32_t) * num_elements)); - - ModelBuilder::nbr_of_metadata_buffers_++; -} - -const Model* ModelBuilder::BuildModel( - std::initializer_list inputs, - std::initializer_list outputs, - size_t num_subgraph_inputs) { - // Model schema requires an empty buffer at idx 0. - size_t buffer_size = 1 + ModelBuilder::nbr_of_metadata_buffers_; - flatbuffers::Offset buffers[kMaxMetadataBuffers]; - buffers[0] = tflite::CreateBuffer(*builder_); - - // Place the metadata buffers first in the buffer since the indices for them - // have already been set in AddMetadata() - for (int i = 1; i < ModelBuilder::nbr_of_metadata_buffers_ + 1; ++i) { - buffers[i] = metadata_buffers_[i - 1]; - } - - // TFLM only supports single subgraph. - constexpr size_t subgraphs_size = 1; - - // Find out number of subgraph inputs. - if (num_subgraph_inputs == 0) { - // This is the default case. - num_subgraph_inputs = inputs.size(); - } else { - // A non-zero value of num_subgraph_inputs means that some of - // the operator input tensors are not subgraph inputs. - TFLITE_DCHECK(num_subgraph_inputs <= inputs.size()); - } - - const flatbuffers::Offset subgraphs[subgraphs_size] = { - tflite::CreateSubGraph( - *builder_, builder_->CreateVector(tensors_, next_tensor_id_), - builder_->CreateVector(inputs.begin(), num_subgraph_inputs), - builder_->CreateVector(outputs.begin(), outputs.size()), - builder_->CreateVector(operators_, next_operator_id_), - builder_->CreateString("test_subgraph"))}; - - flatbuffers::Offset model_offset; - if (ModelBuilder::nbr_of_metadata_buffers_ > 0) { - model_offset = tflite::CreateModel( - *builder_, 0, - builder_->CreateVector(operator_codes_, next_operator_code_id_), - builder_->CreateVector(subgraphs, subgraphs_size), - builder_->CreateString("teset_model"), - builder_->CreateVector(buffers, buffer_size), 0, - builder_->CreateVector(metadata_, - ModelBuilder::nbr_of_metadata_buffers_)); - } else { - model_offset = tflite::CreateModel( - *builder_, 0, - builder_->CreateVector(operator_codes_, next_operator_code_id_), - builder_->CreateVector(subgraphs, subgraphs_size), - builder_->CreateString("teset_model"), - builder_->CreateVector(buffers, buffer_size)); - } - - tflite::FinishModelBuffer(*builder_, model_offset); - void* model_pointer = builder_->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -ModelBuilder::Tensor ModelBuilder::AddTensorImpl( - TensorType type, bool is_variable, std::initializer_list shape) { - TFLITE_DCHECK(next_tensor_id_ <= kMaxTensors); - tensors_[next_tensor_id_] = tflite::CreateTensor( - *builder_, builder_->CreateVector(shape.begin(), shape.size()), type, - /* buffer */ 0, /* name */ 0, /* quantization */ 0, - /* is_variable */ is_variable, - /* sparsity */ 0); - next_tensor_id_++; - return next_tensor_id_ - 1; -} - -const Model* BuildSimpleStatefulModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); - - ModelBuilder model_builder(fb_builder); - - const int op_id = - model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op"); - const int input_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); - const int median_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); - const int invoke_count_tensor = - model_builder.AddTensor(TensorType_INT32, {1}); - - model_builder.AddNode(op_id, {input_tensor}, - {median_tensor, invoke_count_tensor}); - return model_builder.BuildModel({input_tensor}, - {median_tensor, invoke_count_tensor}); -} - -const Model* BuildSimpleModelWithBranch() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); - - ModelBuilder model_builder(fb_builder); - /* Model structure - | t0 - +------| - | v - | +---------+ - | | n0 | - | | | - | +---------+ - v + - | - +---------+ | t1 - | n1 | | - | | | - +---------+ | - | | - t2 | v - | +---------+ - +-->| n2 | - | | - +-------|-+ - |t3 - v - */ - const int op_id = - model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); - const int t0 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - const int t1 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - const int t2 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - const int t3 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - model_builder.AddNode(op_id, {t0}, {t1}); // n0 - model_builder.AddNode(op_id, {t0}, {t2}); // n1 - model_builder.AddNode(op_id, {t1, t2}, {t3}); // n2 - return model_builder.BuildModel({t0}, {t3}); -} - -const Model* BuildModelWithOfflinePlanning(int number_of_tensors, - const int32_t* metadata_buffer, - NodeConnection* node_conn, - int num_conns, - int num_subgraph_inputs) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); - - ModelBuilder model_builder(fb_builder); - - const int op_id = - model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); - - for (int i = 0; i < number_of_tensors; ++i) { - model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - } - - for (int i = 0; i < num_conns; ++i) { - model_builder.AddNode(op_id, node_conn[i].input, node_conn[i].output); - } - - model_builder.AddMetadata( - "OfflineMemoryAllocation", metadata_buffer, - number_of_tensors + tflite::testing::kOfflinePlannerHeaderSize); - - return model_builder.BuildModel( - node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs); -} - -const Model* BuildSimpleMockModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - - constexpr size_t buffer_data_size = 1; - const uint8_t buffer_data[buffer_data_size] = {21}; - constexpr size_t buffers_size = 2; - const Offset buffers[buffers_size] = { - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data, buffer_data_size))}; - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {1}; - constexpr size_t tensors_size = 4; - const Offset tensors[tensors_size] = { - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_input_tensor"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 1, - builder->CreateString("test_weight_tensor"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_output_tensor"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_output2_tensor"), 0, false), - }; - constexpr size_t inputs_size = 1; - const int32_t inputs[inputs_size] = {0}; - constexpr size_t outputs_size = 2; - const int32_t outputs[outputs_size] = {2, 3}; - constexpr size_t operator_inputs_size = 2; - const int32_t operator_inputs[operator_inputs_size] = {0, 1}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {2}; - const int32_t operator2_outputs[operator_outputs_size] = {3}; - constexpr size_t operators_size = 2; - const Offset operators[operators_size] = { - CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE), - CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator2_outputs, operator_outputs_size), - BuiltinOptions_NONE), - }; - constexpr size_t subgraphs_size = 1; - const Offset subgraphs[subgraphs_size] = { - CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), - builder->CreateVector(inputs, inputs_size), - builder->CreateVector(outputs, outputs_size), - builder->CreateVector(operators, operators_size), - builder->CreateString("test_subgraph"))}; - constexpr size_t operator_codes_size = 1; - const Offset operator_codes[operator_codes_size] = { - CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, - "mock_custom", - /*version=*/0, BuiltinOperator_CUSTOM)}; - const Offset model_offset = CreateModel( - *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), - builder->CreateVector(subgraphs, subgraphs_size), - builder->CreateString("test_model"), - builder->CreateVector(buffers, buffers_size)); - FinishModelBuffer(*builder, model_offset); - void* model_pointer = builder->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -const Model* BuildComplexMockModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - - constexpr size_t buffer_data_size = 1; - const uint8_t buffer_data_1[buffer_data_size] = {21}; - const uint8_t buffer_data_2[buffer_data_size] = {21}; - const uint8_t buffer_data_3[buffer_data_size] = {21}; - constexpr size_t buffers_size = 7; - const Offset buffers[buffers_size] = { - // Op 1 buffers: - CreateBuffer(*builder), - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data_1, buffer_data_size)), - // Op 2 buffers: - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data_2, buffer_data_size)), - // Op 3 buffers: - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data_3, buffer_data_size)), - }; - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {1}; - - constexpr size_t tensors_size = 10; - const Offset tensors[tensors_size] = { - // Op 1 inputs: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_input_tensor_1"), 0, - false /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 1, builder->CreateString("test_variable_tensor_1"), - 0, true /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_1"), 0, - false /* is_variable */), - // Op 1 output / Op 2 input: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_output_tensor_1"), 0, - false /* is_variable */), - // Op 2 inputs: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 1, builder->CreateString("test_variable_tensor_2"), - 0, true /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_2"), 0, - false /* is_variable */), - // Op 2 output / Op 3 input: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_output_tensor_2"), 0, - false /* is_variable */), - // Op 3 inputs: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 1, builder->CreateString("test_variable_tensor_3"), - 0, true /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_3"), 0, - false /* is_variable */), - // Op 3 output: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_output_tensor_3"), 0, - false /* is_variable */), - }; - - constexpr size_t operators_size = 3; - Offset operators[operators_size]; - { - // Set Op 1 attributes: - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {3}; - - operators[0] = {CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE)}; - } - - { - // Set Op 2 attributes - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {3, 4, 5}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {6}; - - operators[1] = {CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE)}; - } - - { - // Set Op 3 attributes - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {6, 7, 8}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {9}; - - operators[2] = {CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE)}; - } - - constexpr size_t inputs_size = 1; - const int32_t inputs[inputs_size] = {0}; - constexpr size_t outputs_size = 1; - const int32_t outputs[outputs_size] = {9}; - - constexpr size_t subgraphs_size = 1; - const Offset subgraphs[subgraphs_size] = { - CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), - builder->CreateVector(inputs, inputs_size), - builder->CreateVector(outputs, outputs_size), - builder->CreateVector(operators, operators_size), - builder->CreateString("test_subgraph"))}; - - constexpr size_t operator_codes_size = 1; - const Offset operator_codes[operator_codes_size] = { - CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, - "mock_custom", - /*version=*/0, BuiltinOperator_CUSTOM)}; - - const Offset model_offset = CreateModel( - *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), - builder->CreateVector(subgraphs, subgraphs_size), - builder->CreateString("test_model"), - builder->CreateVector(buffers, buffers_size)); - - FinishModelBuffer(*builder, model_offset); - void* model_pointer = builder->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -const Model* BuildSimpleMultipleInputsModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - - constexpr size_t buffers_size = 1; - const Offset buffers[buffers_size] = { - CreateBuffer(*builder), - }; - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {1}; - constexpr size_t tensors_size = 4; - const Offset tensors[tensors_size] = { - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_input_tensor1"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT8, 0, - builder->CreateString("test_input_tensor2"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_input_tensor3"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_output_tensor"), 0, false), - }; - constexpr size_t inputs_size = 3; - const int32_t inputs[inputs_size] = {0, 1, 2}; - constexpr size_t outputs_size = 1; - const int32_t outputs[outputs_size] = {3}; - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {3}; - constexpr size_t operators_size = 1; - const Offset operators[operators_size] = { - CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE), - }; - constexpr size_t subgraphs_size = 1; - const Offset subgraphs[subgraphs_size] = { - CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), - builder->CreateVector(inputs, inputs_size), - builder->CreateVector(outputs, outputs_size), - builder->CreateVector(operators, operators_size), - builder->CreateString("test_subgraph"))}; - constexpr size_t operator_codes_size = 1; - const Offset operator_codes[operator_codes_size] = { - CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, - "multiple_inputs_op", - /*version=*/0, BuiltinOperator_CUSTOM)}; - const Offset model_offset = CreateModel( - *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), - builder->CreateVector(subgraphs, subgraphs_size), - builder->CreateString("test_model"), - builder->CreateVector(buffers, buffers_size)); - FinishModelBuffer(*builder, model_offset); - void* model_pointer = builder->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -} // namespace - -const TfLiteRegistration* SimpleStatefulOp::getRegistration() { - return GetMutableRegistration(); -} - -TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() { - static TfLiteRegistration r; - r.init = Init; - r.prepare = Prepare; - r.invoke = Invoke; - return &r; -} - -void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer, - size_t length) { - TFLITE_DCHECK(context->AllocateBufferForEval == nullptr); - TFLITE_DCHECK(context->GetScratchBuffer == nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr); - - void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData)); - OpData* data = reinterpret_cast(raw); - *data = {}; - return raw; -} - -TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context, - TfLiteNode* node) { - OpData* data = reinterpret_cast(node->user_data); - - // Make sure that the input is in uint8_t with at least 1 data entry. - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - if (input->type != kTfLiteUInt8) return kTfLiteError; - if (NumElements(input->dims) == 0) return kTfLiteError; - - // Allocate a temporary buffer with the same size of input for sorting. - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, sizeof(uint8_t) * NumElements(input->dims), - &data->sorting_buffer)); - // We can interleave scratch / persistent buffer allocation. - data->invoke_count = reinterpret_cast( - context->AllocatePersistentBuffer(context, sizeof(int))); - *data->invoke_count = 0; - - return kTfLiteOk; -} - -TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, - TfLiteNode* node) { - OpData* data = reinterpret_cast(node->user_data); - *data->invoke_count += 1; - - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - const uint8_t* input_data = GetTensorData(input); - int size = NumElements(input->dims); - - uint8_t* sorting_buffer = reinterpret_cast( - context->GetScratchBuffer(context, data->sorting_buffer)); - // Copy inputs data to the sorting buffer. We don't want to mutate the input - // tensor as it might be used by a another node. - for (int i = 0; i < size; i++) { - sorting_buffer[i] = input_data[i]; - } - - // In place insertion sort on `sorting_buffer`. - for (int i = 1; i < size; i++) { - for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) { - std::swap(sorting_buffer[j], sorting_buffer[j - 1]); - } - } - - TfLiteTensor* median; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kMedianTensor, &median)); - uint8_t* median_data = GetTensorData(median); - TfLiteTensor* invoke_count; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kInvokeCount, &invoke_count)); - int32_t* invoke_count_data = GetTensorData(invoke_count); - - median_data[0] = sorting_buffer[size / 2]; - invoke_count_data[0] = *data->invoke_count; - return kTfLiteOk; -} - -const TfLiteRegistration* MockCustom::getRegistration() { - return GetMutableRegistration(); -} - -TfLiteRegistration* MockCustom::GetMutableRegistration() { - static TfLiteRegistration r; - r.init = Init; - r.prepare = Prepare; - r.invoke = Invoke; - r.free = Free; - return &r; -} - -void* MockCustom::Init(TfLiteContext* context, const char* buffer, - size_t length) { - // We don't support delegate in TFL micro. This is a weak check to test if - // context struct being zero-initialized. - TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); - freed_ = false; - // Do nothing. - return nullptr; -} - -void MockCustom::Free(TfLiteContext* context, void* buffer) { freed_ = true; } - -TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - -TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); - const int32_t* input_data = input->data.i32; - const TfLiteTensor* weight; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &weight)); - const uint8_t* weight_data = weight->data.uint8; - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); - int32_t* output_data = output->data.i32; - output_data[0] = - 0; // Catch output tensor sharing memory with an input tensor - output_data[0] = input_data[0] + weight_data[0]; - return kTfLiteOk; -} - -bool MockCustom::freed_ = false; - -const TfLiteRegistration* MultipleInputs::getRegistration() { - return GetMutableRegistration(); -} - -TfLiteRegistration* MultipleInputs::GetMutableRegistration() { - static TfLiteRegistration r; - r.init = Init; - r.prepare = Prepare; - r.invoke = Invoke; - r.free = Free; - return &r; -} - -void* MultipleInputs::Init(TfLiteContext* context, const char* buffer, - size_t length) { - // We don't support delegate in TFL micro. This is a weak check to test if - // context struct being zero-initialized. - TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); - freed_ = false; - // Do nothing. - return nullptr; -} - -void MultipleInputs::Free(TfLiteContext* context, void* buffer) { - freed_ = true; -} - -TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - -TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); - const int32_t* input_data = input->data.i32; - const TfLiteTensor* input1; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1)); - const int32_t* input_data1 = input1->data.i32; - const TfLiteTensor* input2; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2)); - const int32_t* input_data2 = input2->data.i32; - - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); - int32_t* output_data = output->data.i32; - output_data[0] = - 0; // Catch output tensor sharing memory with an input tensor - output_data[0] = input_data[0] + input_data1[0] + input_data2[0]; - return kTfLiteOk; -} - -bool MultipleInputs::freed_ = false; - -AllOpsResolver GetOpResolver() { - AllOpsResolver op_resolver; - op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration()); - op_resolver.AddCustom("simple_stateful_op", - SimpleStatefulOp::GetMutableRegistration()); - op_resolver.AddCustom("multiple_inputs_op", - MultipleInputs::GetMutableRegistration()); - return op_resolver; -} - -const Model* GetSimpleMockModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleMockModel()); - } - return model; -} - -const Model* GetSimpleMultipleInputsModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleMultipleInputsModel()); - } - return model; -} - -const Model* GetComplexMockModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildComplexMockModel()); - } - return model; -} - -const Model* GetSimpleModelWithBranch() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleModelWithBranch()); - } - return model; -} - -const Model* GetModelWithOfflinePlanning(int num_tensors, - const int32_t* metadata_buffer, - NodeConnection* node_conn, - int num_conns, - int num_subgraph_inputs) { - const Model* model = BuildModelWithOfflinePlanning( - num_tensors, metadata_buffer, node_conn, num_conns, num_subgraph_inputs); - return model; -} - -const Model* GetSimpleStatefulModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleStatefulModel()); - } - return model; -} - -const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {size}; - const Offset tensor_offset = CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_tensor"), 0, - is_variable); - builder->Finish(tensor_offset); - void* tensor_pointer = builder->GetBufferPointer(); - const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); - return tensor; -} - -const Tensor* CreateQuantizedFlatbufferTensor(int size) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - const Offset quant_params = - CreateQuantizationParameters( - *builder, - /*min=*/builder->CreateVector({0.1f}), - /*max=*/builder->CreateVector({0.2f}), - /*scale=*/builder->CreateVector({0.3f}), - /*zero_point=*/builder->CreateVector({100ll})); - - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {size}; - const Offset tensor_offset = CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, - false); - builder->Finish(tensor_offset); - void* tensor_pointer = builder->GetBufferPointer(); - const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); - return tensor; -} - -const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - const Offset quant_params = - CreateQuantizationParameters(*builder, 0, 0, 0, 0, - QuantizationDetails_NONE, 0, 0); - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {size}; - const Offset tensor_offset = CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, - false); - builder->Finish(tensor_offset); - void* tensor_pointer = builder->GetBufferPointer(); - const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); - return tensor; -} - -const flatbuffers::Vector>* -CreateFlatbufferBuffers() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - constexpr size_t buffers_size = 1; - const Offset buffers[buffers_size] = { - CreateBuffer(*builder), - }; - const flatbuffers::Offset>> - buffers_offset = builder->CreateVector(buffers, buffers_size); - builder->Finish(buffers_offset); - void* buffers_pointer = builder->GetBufferPointer(); - const flatbuffers::Vector>* result = - flatbuffers::GetRoot>>( - buffers_pointer); - return result; -} - -int TestStrcmp(const char* a, const char* b) { - if ((a == nullptr) || (b == nullptr)) { - return -1; - } - while ((*a != 0) && (*a == *b)) { - a++; - b++; - } - return *reinterpret_cast(a) - - *reinterpret_cast(b); -} - -// Wrapper to forward kernel errors to the interpreter's error reporter. -void ReportOpError(struct TfLiteContext* context, const char* format, ...) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - ErrorReporter* error_reporter = static_cast(context->impl_); - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(error_reporter, format, args); - va_end(args); -#endif -} - -// Create a TfLiteIntArray from an array of ints. The first element in the -// supplied array must be the size of the array expressed as an int. -TfLiteIntArray* IntArrayFromInts(const int* int_array) { - return const_cast( - reinterpret_cast(int_array)); -} - -// Create a TfLiteFloatArray from an array of floats. The first element in the -// supplied array must be the size of the array expressed as a float. -TfLiteFloatArray* FloatArrayFromFloats(const float* floats) { - static_assert(sizeof(float) == sizeof(int), - "assumes sizeof(float) == sizeof(int) to perform casting"); - int size = static_cast(floats[0]); - *reinterpret_cast(const_cast(floats)) = size; - return reinterpret_cast(const_cast(floats)); -} - -TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, - TfLiteIntArray* dims, float input_scale, - float weights_scale, bool is_variable) { - float bias_scale = input_scale * weights_scale; - tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); - - // Quantized int32_t tensors always have a zero point of 0, since the range of - // int32_t values is large, and because zero point costs extra cycles during - // processing. - TfLiteTensor result = - CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); - return result; -} - -// Quantizes int32_t bias tensor with per-channel weights determined by input -// scale multiplied by weight scale for each channel. -TfLiteTensor CreatePerChannelQuantizedBiasTensor( - const float* input, int32_t* quantized, TfLiteIntArray* dims, - float input_scale, float* weight_scales, float* scales, int* zero_points, - TfLiteAffineQuantization* affine_quant, int quantized_dimension, - bool is_variable) { - int input_size = ElementCount(*dims); - int num_channels = dims->data[quantized_dimension]; - // First element is reserved for array length - zero_points[0] = num_channels; - scales[0] = static_cast(num_channels); - float* scales_array = &scales[1]; - for (int i = 0; i < num_channels; i++) { - scales_array[i] = input_scale * weight_scales[i]; - zero_points[i + 1] = 0; - } - - SymmetricPerChannelQuantize(input, quantized, input_size, - num_channels, scales_array); - - affine_quant->scale = FloatArrayFromFloats(scales); - affine_quant->zero_point = IntArrayFromInts(zero_points); - affine_quant->quantized_dimension = quantized_dimension; - - TfLiteTensor result = CreateTensor(quantized, dims, is_variable); - result.quantization = {kTfLiteAffineQuantization, affine_quant}; - return result; -} - -TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( - const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, - int* zero_points, TfLiteAffineQuantization* affine_quant, - int quantized_dimension, bool is_variable) { - int channel_count = dims->data[quantized_dimension]; - scales[0] = static_cast(channel_count); - zero_points[0] = channel_count; - - SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized, - &scales[1]); - - for (int i = 0; i < channel_count; i++) { - zero_points[i + 1] = 0; - } - - affine_quant->scale = FloatArrayFromFloats(scales); - affine_quant->zero_point = IntArrayFromInts(zero_points); - affine_quant->quantized_dimension = quantized_dimension; - - TfLiteTensor result = CreateTensor(quantized, dims, is_variable); - result.quantization = {kTfLiteAffineQuantization, affine_quant}; - return result; -} - -size_t GetModelTensorCount(const Model* model) { - auto* subgraphs = model->subgraphs(); - if (subgraphs) { - return (*subgraphs)[0]->tensors()->size(); - } - return 0; -} - -} // namespace testing -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/test_helpers.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/test_helpers.h deleted file mode 100644 index 4c8b7c20..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/test_helpers.h +++ /dev/null @@ -1,241 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ -#define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ - -// Useful functions for writing tests. - -#include -#include - -#include "flatbuffers/flatbuffers.h" // from @flatbuffers -#include "tensorflow/lite//kernels/internal/tensor_ctypes.h" -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/kernels/internal/compatibility.h" -#include "tensorflow/lite/micro/all_ops_resolver.h" -#include "tensorflow/lite/micro/micro_utils.h" -#include "tensorflow/lite/portable_type_to_tflitetype.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { -namespace testing { - -constexpr int kOfflinePlannerHeaderSize = 3; - -struct NodeConnection_ { - std::initializer_list input; - std::initializer_list output; -}; -typedef struct NodeConnection_ NodeConnection; - -// A simple operator that returns the median of the input with the number of -// times the kernel was invoked. The implementation below is deliberately -// complicated, just to demonstrate how kernel memory planning works. -class SimpleStatefulOp { - static constexpr int kBufferNotAllocated = 0; - // Inputs: - static constexpr int kInputTensor = 0; - // Outputs: - static constexpr int kMedianTensor = 0; - static constexpr int kInvokeCount = 1; - struct OpData { - int* invoke_count = nullptr; - int sorting_buffer = kBufferNotAllocated; - }; - - public: - static const TfLiteRegistration* getRegistration(); - static TfLiteRegistration* GetMutableRegistration(); - static void* Init(TfLiteContext* context, const char* buffer, size_t length); - static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); - static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); -}; - -class MockCustom { - public: - static const TfLiteRegistration* getRegistration(); - static TfLiteRegistration* GetMutableRegistration(); - static void* Init(TfLiteContext* context, const char* buffer, size_t length); - static void Free(TfLiteContext* context, void* buffer); - static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); - static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); - - static bool freed_; -}; - -// A simple operator with the purpose of testing multiple inputs. It returns -// the sum of the inputs. -class MultipleInputs { - public: - static const TfLiteRegistration* getRegistration(); - static TfLiteRegistration* GetMutableRegistration(); - static void* Init(TfLiteContext* context, const char* buffer, size_t length); - static void Free(TfLiteContext* context, void* buffer); - static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); - static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); - - static bool freed_; -}; - -// Returns an Op Resolver that can be used in the testing code. -AllOpsResolver GetOpResolver(); - -// Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input, -// 1 layer of weights, 1 output Tensor, and 1 operator. -const Model* GetSimpleMockModel(); - -// Returns a flatbuffer TensorFlow Lite model with more inputs, variable -// tensors, and operators. -const Model* GetComplexMockModel(); - -// Returns a simple flatbuffer model with two branches. -const Model* GetSimpleModelWithBranch(); - -// Returns a simple example flatbuffer TensorFlow Lite model. Contains 3 inputs, -// 1 output Tensor, and 1 operator. -const Model* GetSimpleMultipleInputsModel(); - -// Returns a simple flatbuffer model with offline planned tensors -// @param[in] num_tensors Number of tensors in the model. -// @param[in] metadata_buffer Metadata for offline planner. -// @param[in] node_con List of connections, i.e. operators -// in the model. -// @param[in] num_conns Number of connections. -// @param[in] num_subgraph_inputs How many of the input tensors are in -// the subgraph inputs. The default value -// of 0 means all of the input tensors -// are in the subgraph input list. There -// must be at least 1 input tensor in the -// subgraph input list. -const Model* GetModelWithOfflinePlanning(int num_tensors, - const int32_t* metadata_buffer, - NodeConnection* node_conn, - int num_conns, - int num_subgraph_inputs = 0); - -// Returns a flatbuffer model with `simple_stateful_op` -const Model* GetSimpleStatefulModel(); - -// Builds a one-dimensional flatbuffer tensor of the given size. -const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false); - -// Builds a one-dimensional flatbuffer tensor of the given size with -// quantization metadata. -const Tensor* CreateQuantizedFlatbufferTensor(int size); - -// Creates a one-dimensional tensor with no quantization metadata. -const Tensor* CreateMissingQuantizationFlatbufferTensor(int size); - -// Creates a vector of flatbuffer buffers. -const flatbuffers::Vector>* -CreateFlatbufferBuffers(); - -// Performs a simple string comparison without requiring standard C library. -int TestStrcmp(const char* a, const char* b); - -// Wrapper to forward kernel errors to the interpreter's error reporter. -void ReportOpError(struct TfLiteContext* context, const char* format, ...); - -void PopulateContext(TfLiteTensor* tensors, int tensors_size, - TfLiteContext* context); - -// Create a TfLiteIntArray from an array of ints. The first element in the -// supplied array must be the size of the array expressed as an int. -TfLiteIntArray* IntArrayFromInts(const int* int_array); - -// Create a TfLiteFloatArray from an array of floats. The first element in the -// supplied array must be the size of the array expressed as a float. -TfLiteFloatArray* FloatArrayFromFloats(const float* floats); - -template -TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims, - const bool is_variable = false) { - TfLiteTensor result; - result.dims = dims; - result.params = {}; - result.quantization = {kTfLiteNoQuantization, nullptr}; - result.is_variable = is_variable; - result.allocation_type = kTfLiteMemNone; - result.type = typeToTfLiteType(); - // Const cast is used to allow passing in const and non-const arrays within a - // single CreateTensor method. A Const array should be used for immutable - // input tensors and non-const array should be used for mutable and output - // tensors. - result.data.data = const_cast(data); - result.quantization = {kTfLiteAffineQuantization, nullptr}; - result.bytes = ElementCount(*dims) * sizeof(T); - return result; -} - -template -TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims, - const float scale, const int zero_point = 0, - const bool is_variable = false) { - TfLiteTensor result = CreateTensor(data, dims, is_variable); - result.params = {scale, zero_point}; - result.quantization = {kTfLiteAffineQuantization, nullptr}; - return result; -} - -template -TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized, - TfLiteIntArray* dims, float scale, - int zero_point, bool is_variable = false) { - int input_size = ElementCount(*dims); - tflite::Quantize(input, quantized, input_size, scale, zero_point); - return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable); -} - -TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, - TfLiteIntArray* dims, float input_scale, - float weights_scale, - bool is_variable = false); - -// Quantizes int32_t bias tensor with per-channel weights determined by input -// scale multiplied by weight scale for each channel. -TfLiteTensor CreatePerChannelQuantizedBiasTensor( - const float* input, int32_t* quantized, TfLiteIntArray* dims, - float input_scale, float* weight_scales, float* scales, int* zero_points, - TfLiteAffineQuantization* affine_quant, int quantized_dimension, - bool is_variable = false); - -TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( - const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, - int* zero_points, TfLiteAffineQuantization* affine_quant, - int quantized_dimension, bool is_variable = false); - -// Returns the number of tensors in the default subgraph for a tflite::Model. -size_t GetModelTensorCount(const Model* model); - -// Derives the quantization scaling factor from a min and max range. -template -inline float ScaleFromMinMax(const float min, const float max) { - return (max - min) / - static_cast((std::numeric_limits::max() * 1.0) - - std::numeric_limits::min()); -} - -// Derives the quantization zero point from a min and max range. -template -inline int ZeroPointFromMinMax(const float min, const float max) { - return static_cast(std::numeric_limits::min()) + - static_cast(-min / ScaleFromMinMax(min, max) + 0.5f); -} - -} // namespace testing -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/micro_test.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/micro_test.h deleted file mode 100644 index d74d8f4f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/micro_test.h +++ /dev/null @@ -1,238 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// An ultra-lightweight testing framework designed for use with microcontroller -// applications. Its only dependency is on TensorFlow Lite's ErrorReporter -// interface, where log messages are output. This is designed to be usable even -// when no standard C or C++ libraries are available, and without any dynamic -// memory allocation or reliance on global constructors. -// -// To build a test, you use syntax similar to gunit, but with some extra -// decoration to create a hidden 'main' function containing each of the tests to -// be run. Your code should look something like: -// ---------------------------------------------------------------------------- -// #include "path/to/this/header" -// -// TF_LITE_MICRO_TESTS_BEGIN -// -// TF_LITE_MICRO_TEST(SomeTest) { -// TF_LITE_LOG_EXPECT_EQ(true, true); -// } -// -// TF_LITE_MICRO_TESTS_END -// ---------------------------------------------------------------------------- -// If you compile this for your platform, you'll get a normal binary that you -// should be able to run. Executing it will output logging information like this -// to stderr (or whatever equivalent is available and written to by -// ErrorReporter): -// ---------------------------------------------------------------------------- -// Testing SomeTest -// 1/1 tests passed -// ~~~ALL TESTS PASSED~~~ -// ---------------------------------------------------------------------------- -// This is designed to be human-readable, so you can just run tests manually, -// but the string "~~~ALL TESTS PASSED~~~" should only appear if all of the -// tests do pass. This makes it possible to integrate with automated test -// systems by scanning the output logs and looking for that magic value. -// -// This framework is intended to be a rudimentary alternative to no testing at -// all on systems that struggle to run more conventional approaches, so use with -// caution! - -#ifndef TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ -#define TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/micro/micro_error_reporter.h" - -namespace micro_test { -extern int tests_passed; -extern int tests_failed; -extern bool is_test_complete; -extern bool did_test_fail; -extern tflite::ErrorReporter* reporter; -} // namespace micro_test - -#define TF_LITE_MICRO_TESTS_BEGIN \ - namespace micro_test { \ - int tests_passed; \ - int tests_failed; \ - bool is_test_complete; \ - bool did_test_fail; \ - tflite::ErrorReporter* reporter; \ - } \ - \ - int main(int argc, char** argv) { \ - micro_test::tests_passed = 0; \ - micro_test::tests_failed = 0; \ - tflite::MicroErrorReporter error_reporter; \ - micro_test::reporter = &error_reporter; - -#define TF_LITE_MICRO_TESTS_END \ - micro_test::reporter->Report( \ - "%d/%d tests passed", micro_test::tests_passed, \ - (micro_test::tests_failed + micro_test::tests_passed)); \ - if (micro_test::tests_failed == 0) { \ - micro_test::reporter->Report("~~~ALL TESTS PASSED~~~\n"); \ - return kTfLiteOk; \ - } else { \ - micro_test::reporter->Report("~~~SOME TESTS FAILED~~~\n"); \ - return kTfLiteError; \ - } \ - } - -// TODO(petewarden): I'm going to hell for what I'm doing to this poor for loop. -#define TF_LITE_MICRO_TEST(name) \ - micro_test::reporter->Report("Testing " #name); \ - for (micro_test::is_test_complete = false, \ - micro_test::did_test_fail = false; \ - !micro_test::is_test_complete; micro_test::is_test_complete = true, \ - micro_test::tests_passed += (micro_test::did_test_fail) ? 0 : 1, \ - micro_test::tests_failed += (micro_test::did_test_fail) ? 1 : 0) - -#define TF_LITE_MICRO_EXPECT(x) \ - do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " failed at %s:%d", __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -// TODO(b/139142772): this macro is used with types other than ints even though -// the printf specifier is %d. -#define TF_LITE_MICRO_EXPECT_EQ(x, y) \ - do { \ - auto vx = x; \ - auto vy = y; \ - if ((vx) != (vy)) { \ - micro_test::reporter->Report(#x " == " #y " failed at %s:%d (%d vs %d)", \ - __FILE__, __LINE__, static_cast(vx), \ - static_cast(vy)); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_NE(x, y) \ - do { \ - if ((x) == (y)) { \ - micro_test::reporter->Report(#x " != " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -// TODO(wangtz): Making it more generic once needed. -#define TF_LITE_MICRO_ARRAY_ELEMENT_EXPECT_NEAR(arr1, idx1, arr2, idx2, \ - epsilon) \ - do { \ - auto delta = ((arr1)[(idx1)] > (arr2)[(idx2)]) \ - ? ((arr1)[(idx1)] - (arr2)[(idx2)]) \ - : ((arr2)[(idx2)] - (arr1)[(idx1)]); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #arr1 "[%d] (%f) near " #arr2 "[%d] (%f) failed at %s:%d", \ - static_cast(idx1), static_cast((arr1)[(idx1)]), \ - static_cast(idx2), static_cast((arr2)[(idx2)]), \ - __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_NEAR(x, y, epsilon) \ - do { \ - auto vx = (x); \ - auto vy = (y); \ - auto delta = ((vx) > (vy)) ? ((vx) - (vy)) : ((vy) - (vx)); \ - if (delta > epsilon) { \ - micro_test::reporter->Report( \ - #x " (%f) near " #y " (%f) failed at %s:%d", \ - static_cast(vx), static_cast(vy), __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GT(x, y) \ - do { \ - if ((x) <= (y)) { \ - micro_test::reporter->Report(#x " > " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_LT(x, y) \ - do { \ - if ((x) >= (y)) { \ - micro_test::reporter->Report(#x " < " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_GE(x, y) \ - do { \ - if ((x) < (y)) { \ - micro_test::reporter->Report(#x " >= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_LE(x, y) \ - do { \ - if ((x) > (y)) { \ - micro_test::reporter->Report(#x " <= " #y " failed at %s:%d", __FILE__, \ - __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_TRUE(x) \ - do { \ - if (!(x)) { \ - micro_test::reporter->Report(#x " was not true failed at %s:%d", \ - __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_FALSE(x) \ - do { \ - if (x) { \ - micro_test::reporter->Report(#x " was not false failed at %s:%d", \ - __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } while (false) - -#define TF_LITE_MICRO_FAIL(msg) \ - do { \ - micro_test::reporter->Report("FAIL: %s", msg, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } while (false) - -#define TF_LITE_MICRO_EXPECT_STRING_EQ(string1, string2) \ - do { \ - for (int i = 0; string1[i] != '\0' && string2[i] != '\0'; i++) { \ - if (string1[i] != string2[i]) { \ - micro_test::reporter->Report("FAIL: %s did not match %s", string1, \ - string2, __FILE__, __LINE__); \ - micro_test::did_test_fail = true; \ - } \ - } \ - } while (false) - -#endif // TENSORFLOW_LITE_MICRO_TESTING_MICRO_TEST_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/test_conv_model.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/test_conv_model.cc deleted file mode 100644 index 358479c3..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/test_conv_model.cc +++ /dev/null @@ -1,1799 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/lite/micro/testing/test_conv_model.h" - -extern const unsigned char kTestConvModelData[] = { - 0x24, 0x00, 0x00, 0x00, 0x54, 0x46, 0x4c, 0x33, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x1c, 0x00, 0x04, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x00, - 0x12, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xb4, 0x52, 0x00, 0x00, - 0x3c, 0x42, 0x00, 0x00, 0x24, 0x42, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x04, 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, - 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x0f, 0x00, 0x00, 0x00, - 0xd4, 0x41, 0x00, 0x00, 0xc0, 0x41, 0x00, 0x00, 0x64, 0x41, 0x00, 0x00, - 0xc0, 0x40, 0x00, 0x00, 0x7c, 0x40, 0x00, 0x00, 0x58, 0x40, 0x00, 0x00, - 0x44, 0x13, 0x00, 0x00, 0xa0, 0x12, 0x00, 0x00, 0x8c, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, 0x58, 0x00, 0x00, 0x00, - 0x44, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xd6, 0xbe, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x31, 0x2e, 0x35, 0x2e, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x94, 0xb2, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0xb2, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xb4, 0xb2, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xc4, 0xb2, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xb2, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x46, 0xbf, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x12, 0x00, 0x00, 0x7d, 0x6a, 0x24, 0xa1, 0xf6, 0xca, 0x70, 0x2f, - 0x8e, 0xb1, 0xe8, 0x15, 0x42, 0x08, 0x32, 0xf6, 0xe9, 0xfb, 0xa0, 0xda, - 0xe4, 0xf1, 0x0a, 0x9d, 0x72, 0x66, 0x88, 0x37, 0xe9, 0x9e, 0x08, 0x54, - 0x61, 0x51, 0x40, 0x93, 0x4d, 0xcf, 0xe2, 0x08, 0x36, 0xad, 0xb1, 0x8e, - 0xfc, 0xe4, 0x02, 0xd1, 0x9a, 0x1e, 0x05, 0x67, 0xa3, 0x3b, 0xa6, 0xde, - 0x5d, 0x2a, 0xcc, 0x8c, 0x3c, 0x2e, 0xd2, 0x15, 0xc2, 0x60, 0xab, 0xea, - 0x73, 0xe4, 0x88, 0xc1, 0x66, 0x21, 0xb0, 0xe5, 0x5b, 0x55, 0xda, 0x69, - 0x2d, 0x0c, 0x66, 0x07, 0x74, 0x36, 0xcd, 0x79, 0x81, 0xf9, 0x5c, 0x2c, - 0xb5, 0x93, 0xab, 0x76, 0xa1, 0x1f, 0x20, 0x90, 0x89, 0xe1, 0x41, 0xc7, - 0x32, 0xc2, 0xa3, 0x03, 0x77, 0x86, 0x79, 0xf7, 0x89, 0xc1, 0xb1, 0x42, - 0x2a, 0x75, 0xc7, 0xc1, 0x2f, 0xbb, 0xf6, 0xe8, 0x23, 0x99, 0x9b, 0x74, - 0x9c, 0xe5, 0x91, 0x15, 0xc6, 0x08, 0x0e, 0xae, 0x7c, 0xd3, 0x27, 0x54, - 0xfb, 0xa7, 0x49, 0x65, 0x52, 0x2f, 0x63, 0x33, 0x8b, 0x5f, 0x67, 0x21, - 0x25, 0xe0, 0xcf, 0x95, 0x03, 0x05, 0x19, 0x0c, 0x3d, 0xfc, 0x95, 0x42, - 0xa9, 0x26, 0x27, 0x54, 0xa3, 0x71, 0xb4, 0x70, 0x7a, 0x40, 0x0d, 0xc1, - 0x72, 0x04, 0x81, 0x3b, 0xb9, 0xb7, 0xd2, 0xc1, 0x4e, 0xf8, 0xff, 0xca, - 0x66, 0xc1, 0xbe, 0xb9, 0x09, 0xbd, 0xb9, 0x2c, 0x5b, 0x97, 0xc3, 0xa8, - 0xf6, 0xc4, 0x23, 0x93, 0x2e, 0xf6, 0xce, 0x2e, 0xdb, 0xfb, 0x8f, 0xb0, - 0xc8, 0xba, 0xfa, 0x97, 0xfd, 0xc0, 0x0a, 0xc8, 0x2c, 0xf3, 0x4c, 0x4d, - 0x8b, 0x3b, 0x47, 0x11, 0xfb, 0xe8, 0x96, 0xe3, 0xcc, 0xef, 0xe4, 0xb5, - 0x07, 0xa1, 0xb7, 0xa9, 0xf7, 0x98, 0x71, 0x59, 0x9b, 0x5a, 0x7b, 0x88, - 0xe4, 0xcf, 0x9b, 0x55, 0x26, 0xce, 0x59, 0x73, 0x66, 0x17, 0x9c, 0x74, - 0x02, 0xfc, 0x24, 0x01, 0xde, 0x44, 0x98, 0xe3, 0x8b, 0x18, 0x02, 0x42, - 0xf5, 0x0f, 0xbc, 0xcb, 0xf7, 0x37, 0xb1, 0xd5, 0xb4, 0x7c, 0x0a, 0x6a, - 0x59, 0x59, 0xc9, 0x11, 0xd8, 0x0f, 0xf9, 0xab, 0x40, 0xdd, 0x14, 0xf9, - 0x30, 0xaa, 0xf1, 0x8c, 0x6d, 0xbc, 0x4c, 0x5b, 0x71, 0x95, 0xfd, 0x41, - 0x4c, 0xf3, 0xb4, 0x7f, 0x1c, 0xb6, 0x4b, 0x12, 0x3b, 0x6e, 0xc1, 0xce, - 0x6f, 0xf8, 0x57, 0xb7, 0x5e, 0x2a, 0x36, 0x32, 0x3d, 0x85, 0xc6, 0xbf, - 0xd7, 0xab, 0x95, 0x45, 0x62, 0xae, 0xb8, 0xa6, 0x03, 0xcc, 0x21, 0x25, - 0x18, 0x5a, 0xa8, 0x03, 0x27, 0x33, 0x47, 0xb1, 0x7e, 0x0e, 0xbd, 0xc3, - 0x24, 0x25, 0x78, 0x28, 0xa4, 0xe3, 0x5b, 0x08, 0xbf, 0x04, 0xa2, 0xae, - 0x90, 0x4c, 0x96, 0x78, 0xa8, 0xb1, 0xb8, 0x54, 0x89, 0x25, 0x2d, 0x35, - 0x93, 0x95, 0xa5, 0xd3, 0x1a, 0xe6, 0x00, 0x8b, 0xfe, 0x36, 0x0f, 0xd2, - 0x6e, 0xff, 0x86, 0x93, 0x48, 0xb8, 0x08, 0x39, 0x1f, 0x3a, 0x2d, 0xe7, - 0x47, 0x5e, 0x05, 0x66, 0x7a, 0xb8, 0xe4, 0xda, 0xbc, 0x5b, 0x57, 0xdf, - 0xd9, 0x0a, 0xb9, 0x48, 0x5d, 0x0c, 0x57, 0xed, 0x8d, 0xbb, 0x8d, 0x4b, - 0x0e, 0xb8, 0xea, 0x02, 0x06, 0x2f, 0xfd, 0x28, 0x0d, 0x0b, 0xf4, 0xf4, - 0x52, 0x81, 0x77, 0x15, 0x87, 0x53, 0x28, 0xef, 0xbe, 0xc6, 0x4c, 0x45, - 0x3e, 0x1a, 0x6e, 0xbd, 0x10, 0xd8, 0x9a, 0x72, 0x1f, 0x14, 0xe2, 0x37, - 0x08, 0xaf, 0xfa, 0xce, 0xd3, 0x84, 0x23, 0x43, 0x8c, 0x5c, 0xce, 0x1b, - 0xf7, 0xf3, 0xb0, 0x3b, 0xfd, 0x33, 0xf8, 0x09, 0xf1, 0x41, 0xa5, 0xa8, - 0x86, 0x8d, 0x56, 0xde, 0xf6, 0x68, 0xe3, 0x4c, 0x97, 0xa6, 0xc3, 0x66, - 0x9b, 0xa9, 0x8a, 0xbd, 0x59, 0x45, 0xfb, 0xdf, 0xa1, 0x42, 0x10, 0x1c, - 0x55, 0x22, 0x53, 0xe1, 0x32, 0x33, 0xf9, 0xfa, 0xc2, 0x70, 0x0f, 0x49, - 0x15, 0xa7, 0x21, 0xbc, 0x56, 0x35, 0x09, 0x06, 0xe6, 0x5e, 0xc4, 0xc1, - 0x64, 0x93, 0x59, 0x3b, 0x8e, 0xb7, 0x52, 0x6c, 0x4d, 0xa1, 0xb7, 0xee, - 0x14, 0xc2, 0x01, 0x25, 0xbb, 0x5e, 0xe0, 0xc6, 0xa4, 0x4f, 0xb5, 0x20, - 0x88, 0xe0, 0xd7, 0x5e, 0x26, 0x5b, 0x9f, 0xf7, 0xb5, 0x26, 0x5b, 0xfc, - 0xf3, 0x3e, 0xf3, 0x57, 0x6f, 0x9e, 0x9e, 0x51, 0x07, 0x6e, 0xc0, 0x53, - 0x17, 0x89, 0x79, 0xf0, 0x91, 0xb2, 0x54, 0x30, 0x1f, 0x97, 0x95, 0xfc, - 0x02, 0x2d, 0x0c, 0x06, 0xb0, 0x82, 0xad, 0x20, 0xc2, 0xdc, 0x78, 0xbc, - 0xbe, 0x5b, 0x88, 0xa0, 0xdd, 0x45, 0x49, 0x26, 0xec, 0xb4, 0xa5, 0x8b, - 0x7f, 0xdd, 0x40, 0xcf, 0x9e, 0xbe, 0x46, 0x4d, 0x36, 0xab, 0x0a, 0x34, - 0x1a, 0x2a, 0xd0, 0xd3, 0x83, 0x96, 0xff, 0x88, 0xa4, 0xd8, 0x48, 0x75, - 0x2f, 0xcb, 0x3c, 0xc3, 0xbb, 0xc7, 0x2f, 0xe9, 0xf9, 0xa3, 0xde, 0x9d, - 0xbb, 0x5e, 0x37, 0x29, 0xf6, 0x75, 0xcc, 0x85, 0xeb, 0xf9, 0x73, 0xf7, - 0xdc, 0x31, 0x8c, 0x56, 0x52, 0x4a, 0x44, 0xa4, 0x2a, 0x2a, 0x51, 0x49, - 0x77, 0x6d, 0x35, 0x0a, 0xf9, 0x44, 0xaa, 0x36, 0x05, 0xef, 0x1e, 0x6b, - 0xe5, 0x65, 0x6b, 0xaa, 0xc1, 0x41, 0x9c, 0x62, 0xd0, 0x70, 0x78, 0xff, - 0x88, 0xe8, 0x5f, 0x3c, 0x2e, 0x00, 0x6c, 0xe3, 0xdb, 0xc3, 0x54, 0x66, - 0xa9, 0xf4, 0xe2, 0x4c, 0x91, 0x11, 0xc8, 0x3c, 0x39, 0x9b, 0x31, 0x81, - 0xc7, 0x11, 0x22, 0x62, 0xb7, 0x26, 0xa0, 0x0c, 0x2e, 0x6c, 0xe7, 0x34, - 0x3b, 0x1f, 0x27, 0xb3, 0xe5, 0x4f, 0xc9, 0x71, 0xb2, 0x18, 0x99, 0x59, - 0x95, 0xc6, 0x35, 0x4c, 0x5d, 0xa3, 0x59, 0xd1, 0x8b, 0x71, 0xea, 0xe7, - 0x30, 0x3f, 0xe7, 0x8c, 0x1a, 0x59, 0xeb, 0xc5, 0x5d, 0xbd, 0xe6, 0x00, - 0x67, 0x02, 0xfb, 0xca, 0x8d, 0xdf, 0x71, 0xb6, 0xed, 0xc7, 0xd2, 0xf2, - 0x72, 0x1b, 0xd3, 0x63, 0x51, 0x1f, 0x04, 0xe9, 0xf9, 0xe2, 0x38, 0x13, - 0x48, 0x63, 0x19, 0x66, 0x2b, 0x48, 0xc8, 0x1b, 0x9d, 0x19, 0x5a, 0x57, - 0x44, 0x2d, 0x30, 0xb5, 0xce, 0x3b, 0xcc, 0xae, 0xc4, 0x5e, 0x4e, 0x96, - 0x62, 0x5c, 0x53, 0x1f, 0xbf, 0xbd, 0xc8, 0x9d, 0xcf, 0x81, 0xb3, 0x1e, - 0xb0, 0x22, 0xd5, 0xbe, 0x60, 0x65, 0xd9, 0xeb, 0x11, 0x74, 0x8c, 0x24, - 0x18, 0x67, 0x45, 0xd3, 0xf8, 0x3f, 0xc5, 0xdf, 0xac, 0x65, 0xd4, 0x0c, - 0x82, 0x63, 0xd6, 0x43, 0x94, 0xa0, 0x3b, 0xff, 0x03, 0x0f, 0xbb, 0xe4, - 0x4d, 0x3b, 0x41, 0x9f, 0xf4, 0x1a, 0xa9, 0xdb, 0x15, 0x5b, 0x9a, 0x92, - 0xcb, 0xd5, 0xb8, 0x33, 0x5e, 0xea, 0x28, 0x3d, 0x2d, 0x30, 0x20, 0xcd, - 0xb6, 0x23, 0x18, 0x0e, 0x10, 0x2a, 0xa9, 0xe1, 0xad, 0xbc, 0x96, 0xd1, - 0xf9, 0xf3, 0x95, 0x4f, 0x2a, 0x0b, 0x91, 0xff, 0xf0, 0x96, 0x14, 0x00, - 0xaa, 0xfb, 0x1a, 0x44, 0x21, 0x9b, 0xe8, 0x71, 0x31, 0x9e, 0xd6, 0x58, - 0x7f, 0x02, 0x36, 0x5e, 0x92, 0x8d, 0x93, 0x99, 0xac, 0xb6, 0x87, 0x39, - 0xda, 0x47, 0xef, 0x70, 0xd4, 0xf7, 0x8d, 0x2a, 0xbd, 0x08, 0x40, 0x4d, - 0xec, 0xeb, 0x4e, 0x1b, 0x85, 0x5d, 0x55, 0x64, 0x4c, 0xf3, 0x5e, 0x8f, - 0x68, 0x1e, 0x5e, 0x64, 0xc3, 0xb8, 0x92, 0x24, 0x41, 0x98, 0x78, 0x09, - 0x85, 0x87, 0x17, 0x2c, 0x88, 0x9e, 0x62, 0x86, 0x4f, 0x44, 0x71, 0x9c, - 0xa8, 0x73, 0xb3, 0x14, 0x1f, 0x3c, 0x96, 0x6b, 0xab, 0xad, 0x43, 0xdf, - 0x67, 0x34, 0x66, 0x30, 0x1d, 0x15, 0xd3, 0xe7, 0xd5, 0x8b, 0x00, 0xaa, - 0x11, 0x77, 0xea, 0x36, 0xc9, 0x49, 0x99, 0x93, 0x01, 0x6e, 0x00, 0x4a, - 0x93, 0x08, 0x2c, 0x44, 0x01, 0x91, 0xe0, 0x91, 0xdd, 0xab, 0x70, 0x4b, - 0xe7, 0xbf, 0x2d, 0x0f, 0xd4, 0x52, 0xa0, 0xf1, 0x5d, 0xa0, 0xcc, 0xb9, - 0x1b, 0xa2, 0x62, 0xeb, 0x23, 0x1e, 0x8e, 0xbb, 0x2b, 0xb6, 0xc5, 0x3a, - 0xdf, 0x32, 0x99, 0xde, 0x2e, 0x94, 0xcf, 0x98, 0x99, 0x34, 0x59, 0x60, - 0xcf, 0x57, 0xe0, 0xb0, 0xd9, 0x89, 0xaa, 0xc2, 0x4f, 0x1e, 0x38, 0x88, - 0xca, 0x32, 0x93, 0x9b, 0xa3, 0x2b, 0x17, 0x0b, 0x40, 0x5e, 0x69, 0xbd, - 0x14, 0x15, 0xca, 0x1a, 0x21, 0xdf, 0xa8, 0x4e, 0x14, 0x5e, 0x18, 0x40, - 0xe3, 0x4e, 0x04, 0x1f, 0xe5, 0x81, 0x53, 0x11, 0xae, 0x5e, 0x30, 0xe5, - 0xda, 0xd7, 0xf1, 0x3b, 0x72, 0x1b, 0xa5, 0xe3, 0x13, 0xad, 0x40, 0x54, - 0xae, 0xf0, 0xbc, 0x2b, 0xc1, 0x1a, 0x9c, 0xdd, 0xe1, 0xd0, 0x12, 0x10, - 0xfd, 0x59, 0xce, 0x36, 0x60, 0x86, 0xa0, 0xa7, 0xee, 0xe1, 0x02, 0xe6, - 0xf8, 0xf0, 0x5c, 0x4f, 0xa3, 0xa4, 0xe4, 0x09, 0xb9, 0xc3, 0x84, 0xe3, - 0x8d, 0x97, 0x21, 0x62, 0xf3, 0x11, 0x47, 0xb1, 0x4a, 0xce, 0x5b, 0x89, - 0xde, 0x86, 0xb5, 0x0e, 0xba, 0xbc, 0x8c, 0xcf, 0x54, 0x38, 0x3a, 0xc6, - 0xaf, 0x8c, 0x4d, 0x9d, 0xff, 0x58, 0x9b, 0xe8, 0x32, 0xb7, 0xa2, 0x29, - 0xad, 0x91, 0x3a, 0xa5, 0xc7, 0x54, 0xff, 0xd8, 0x47, 0x4f, 0x8f, 0x38, - 0x91, 0x12, 0x76, 0xa3, 0x2e, 0xf7, 0xdd, 0xba, 0xa7, 0xd4, 0x49, 0xe5, - 0xd1, 0x74, 0xe9, 0x2a, 0x29, 0xe4, 0x64, 0xb9, 0x58, 0x98, 0x0c, 0xe5, - 0x1f, 0xb2, 0x0e, 0x33, 0xea, 0xf8, 0x2e, 0xb1, 0x22, 0x46, 0xc2, 0x67, - 0x2d, 0xfe, 0x2e, 0xd3, 0xcf, 0xbc, 0x64, 0x7b, 0x75, 0x24, 0x53, 0x1c, - 0x42, 0x8c, 0x0b, 0x99, 0x9e, 0xa7, 0xa6, 0xb9, 0xfb, 0x5d, 0x86, 0x9f, - 0xe9, 0x04, 0x62, 0xb2, 0x42, 0x81, 0xa2, 0x0d, 0x60, 0x83, 0x40, 0xbb, - 0x21, 0x10, 0xdf, 0xaa, 0xe6, 0x6c, 0x72, 0xc5, 0xb1, 0xad, 0x9f, 0xd2, - 0x91, 0xf8, 0xb6, 0x56, 0xfb, 0x2e, 0xb3, 0xc4, 0x12, 0xd9, 0x86, 0x29, - 0x6c, 0x55, 0x88, 0x72, 0xba, 0xfb, 0x9b, 0xb9, 0x6f, 0x2d, 0x7d, 0x75, - 0xd0, 0x9d, 0xaf, 0x44, 0xb6, 0xbd, 0x7b, 0xec, 0x78, 0xf1, 0xbf, 0x66, - 0xe8, 0x79, 0x66, 0x16, 0x5e, 0xf9, 0x68, 0x89, 0x5b, 0xde, 0x8f, 0xf9, - 0xeb, 0x04, 0x0b, 0x6a, 0x71, 0xa1, 0x3b, 0x46, 0x03, 0xb4, 0x29, 0xa9, - 0x31, 0xf4, 0xc5, 0xd3, 0x43, 0x6d, 0x88, 0x43, 0xa8, 0xef, 0xb7, 0xd7, - 0x75, 0x6b, 0x83, 0x35, 0xb6, 0x2f, 0xe0, 0x5f, 0xf2, 0x14, 0xcd, 0xd0, - 0x06, 0xb3, 0x5e, 0x8b, 0xdb, 0x86, 0x11, 0x94, 0x2f, 0xfb, 0x92, 0x19, - 0x52, 0x7f, 0xcb, 0xe5, 0x22, 0x27, 0x5f, 0xe4, 0x68, 0xb2, 0xcb, 0xc7, - 0xb8, 0xec, 0xfd, 0x9e, 0x39, 0x9c, 0x5b, 0xe4, 0xae, 0xca, 0x83, 0x19, - 0xcf, 0xf0, 0x01, 0xe3, 0xfc, 0xb0, 0x28, 0xda, 0x79, 0x84, 0xfb, 0xfe, - 0xa5, 0xb6, 0xb3, 0xd2, 0x73, 0xd3, 0x11, 0xe5, 0xdf, 0x7a, 0xd7, 0x82, - 0x78, 0x25, 0x06, 0x5b, 0x0f, 0x89, 0x9d, 0x0b, 0x9b, 0xd1, 0x1b, 0xc5, - 0xb7, 0x67, 0xef, 0x7c, 0xa2, 0xa3, 0xca, 0x27, 0xd0, 0x59, 0xb9, 0x99, - 0x86, 0xa9, 0xf6, 0x9a, 0x28, 0xf0, 0xbb, 0x42, 0xd2, 0xa0, 0xa8, 0x01, - 0x29, 0xa1, 0x0c, 0x1b, 0x33, 0x1b, 0x9c, 0xcb, 0xe4, 0x6c, 0x61, 0x0a, - 0xc4, 0xd7, 0x6c, 0xec, 0x86, 0xb3, 0xd2, 0xaa, 0x8c, 0xab, 0x1a, 0xf4, - 0x03, 0x2e, 0x2b, 0x42, 0xbe, 0xc1, 0x31, 0x1d, 0x57, 0x47, 0xdc, 0x7b, - 0xb5, 0x8f, 0x8b, 0xdf, 0x06, 0xad, 0x3f, 0xf4, 0x4f, 0xb5, 0x52, 0x07, - 0x4e, 0x25, 0xb3, 0x73, 0x34, 0x92, 0x6a, 0x89, 0x93, 0x28, 0x8b, 0x96, - 0x9d, 0xdb, 0xb4, 0x77, 0x81, 0x76, 0x86, 0xd2, 0xa5, 0x94, 0x76, 0x35, - 0xc9, 0x66, 0x4e, 0xd8, 0xc5, 0xc3, 0xc9, 0x34, 0xaf, 0xad, 0x4a, 0x7c, - 0x92, 0x24, 0xb1, 0x7d, 0x7d, 0xac, 0xf6, 0xcb, 0x8f, 0x36, 0xc1, 0xb2, - 0x63, 0x78, 0x99, 0x33, 0x23, 0x68, 0x6e, 0x71, 0x6a, 0xcc, 0x05, 0xf9, - 0x41, 0x92, 0x30, 0xf0, 0xb1, 0xb4, 0xa6, 0x46, 0x86, 0x62, 0xd9, 0xd9, - 0x94, 0x8a, 0xb2, 0x9c, 0x68, 0xff, 0xf4, 0x3a, 0x2e, 0xaf, 0xee, 0xcf, - 0x04, 0x94, 0x53, 0x35, 0x25, 0xf9, 0xaa, 0x74, 0x93, 0xf3, 0x63, 0xc0, - 0xd2, 0x22, 0x30, 0x8c, 0xde, 0xa6, 0xb1, 0xb4, 0xa1, 0x56, 0x07, 0x06, - 0x71, 0xa2, 0x9e, 0x42, 0x31, 0xa3, 0x1e, 0xa6, 0x9a, 0xbc, 0x9f, 0x5b, - 0x12, 0x3c, 0xc2, 0x74, 0xf9, 0x61, 0x71, 0xef, 0x73, 0x86, 0xc2, 0x3b, - 0x25, 0x8a, 0x31, 0x72, 0x27, 0xac, 0xa4, 0x72, 0xf3, 0xbb, 0x78, 0x2c, - 0x94, 0xed, 0xa8, 0x3a, 0x42, 0x98, 0x34, 0xda, 0x3e, 0x60, 0x1c, 0x4a, - 0xec, 0x6b, 0x4e, 0x5f, 0x2a, 0x62, 0xb9, 0xad, 0xc9, 0xd9, 0x38, 0x90, - 0xa7, 0x3b, 0xd3, 0x1a, 0xbb, 0x81, 0x0d, 0x33, 0xd9, 0x16, 0x35, 0x8e, - 0xc3, 0x88, 0x36, 0xfa, 0x3e, 0xa8, 0x4f, 0x30, 0x9d, 0xf1, 0x08, 0xea, - 0x40, 0x1b, 0x87, 0x4d, 0x23, 0x8e, 0x8e, 0xb0, 0xe2, 0xf0, 0x27, 0xc1, - 0xdc, 0x0d, 0xe2, 0x8f, 0x93, 0xef, 0x8b, 0xd1, 0x19, 0xa5, 0xbe, 0xd7, - 0x5a, 0x8a, 0x38, 0x62, 0x43, 0xba, 0x74, 0xf8, 0xae, 0x11, 0x1f, 0x1d, - 0xa4, 0x6e, 0x70, 0x94, 0x91, 0x14, 0xf4, 0xff, 0xbe, 0x39, 0xb4, 0x33, - 0xc2, 0x87, 0x74, 0x1b, 0xfd, 0x9a, 0xa8, 0x64, 0x09, 0x4b, 0x7f, 0x95, - 0x0a, 0xcb, 0x6b, 0x15, 0x54, 0x1d, 0xc6, 0x03, 0x1d, 0x1b, 0x25, 0x56, - 0x15, 0xb5, 0xd7, 0xe5, 0xd6, 0xf3, 0x28, 0xa4, 0xde, 0x1b, 0x39, 0x0d, - 0x59, 0x26, 0x12, 0xe4, 0x32, 0xf2, 0x25, 0xeb, 0xc0, 0xdb, 0x58, 0xe5, - 0xce, 0x64, 0x6f, 0x70, 0x74, 0xc1, 0xc9, 0xbd, 0x75, 0xef, 0x16, 0x02, - 0xdf, 0x27, 0x09, 0xc8, 0xb8, 0x37, 0x8f, 0x44, 0x0d, 0x58, 0x48, 0xf5, - 0xc2, 0x53, 0x21, 0x28, 0x16, 0xa4, 0x56, 0x02, 0xdf, 0xa7, 0x97, 0xa4, - 0x5c, 0x48, 0x75, 0x51, 0x89, 0x0b, 0xa7, 0x4d, 0xd9, 0x9e, 0x04, 0x4e, - 0x5d, 0x6c, 0xe5, 0x1f, 0x68, 0x88, 0xcc, 0xb7, 0x9a, 0x20, 0x05, 0x83, - 0x82, 0x6c, 0xfd, 0xdb, 0x07, 0x6c, 0xec, 0x61, 0xaa, 0x36, 0x57, 0x68, - 0x01, 0xf2, 0x70, 0xfe, 0xe6, 0x4d, 0xe1, 0xa9, 0xb6, 0xb6, 0x52, 0xe6, - 0x20, 0x52, 0x0f, 0x27, 0x9a, 0x1c, 0x2d, 0x20, 0x9b, 0xd4, 0x07, 0xd3, - 0xf6, 0x85, 0x4b, 0xf2, 0x52, 0x4d, 0x4c, 0xd7, 0xf0, 0x32, 0x5d, 0x2e, - 0xef, 0xa2, 0xd0, 0xcd, 0x48, 0x89, 0xbc, 0x9f, 0xcb, 0x37, 0x02, 0x29, - 0xa5, 0xdb, 0xab, 0xfa, 0x1d, 0xf4, 0x53, 0x78, 0x30, 0xde, 0x2c, 0x5c, - 0x35, 0x7f, 0x3d, 0xe1, 0xe0, 0xce, 0xdb, 0x13, 0xca, 0x2a, 0xae, 0xdf, - 0x1c, 0xb1, 0xb6, 0xb9, 0x6a, 0x9f, 0x28, 0xb0, 0x54, 0x5a, 0x00, 0xdd, - 0x76, 0x14, 0xfb, 0x17, 0xc2, 0x2a, 0x45, 0xa2, 0x18, 0xbb, 0x8a, 0x3e, - 0xbe, 0x0e, 0xa5, 0x1b, 0x3c, 0x70, 0x56, 0x10, 0x98, 0xec, 0xc6, 0x3a, - 0x95, 0x2a, 0x96, 0x6a, 0x44, 0xef, 0xd9, 0x9c, 0x2a, 0x45, 0xb4, 0x15, - 0xf8, 0x2e, 0x03, 0x5d, 0x8c, 0x79, 0xfb, 0xb0, 0x53, 0x71, 0xcd, 0x0d, - 0xf4, 0xe2, 0xfc, 0x3b, 0x71, 0xee, 0x30, 0xf2, 0x29, 0xd3, 0xaa, 0x18, - 0x7a, 0x45, 0x1d, 0x99, 0x6d, 0x2f, 0x1f, 0x2d, 0x32, 0x23, 0x48, 0xc2, - 0x69, 0x33, 0x3d, 0x04, 0xa7, 0xa3, 0x96, 0xb5, 0x76, 0x5b, 0x4e, 0xb7, - 0x3c, 0x10, 0x58, 0x17, 0xf4, 0x5f, 0xec, 0x51, 0x6d, 0x5a, 0x3b, 0x7f, - 0x1e, 0x0e, 0xbb, 0xbf, 0x77, 0x43, 0xf7, 0xa4, 0x57, 0xc0, 0x33, 0xac, - 0xc1, 0xe3, 0x3e, 0x1f, 0x65, 0x3c, 0x62, 0x19, 0x46, 0x2d, 0x7b, 0x2d, - 0x07, 0x44, 0x48, 0xf4, 0x91, 0xdf, 0x59, 0x32, 0x10, 0xf7, 0x12, 0xe2, - 0xe5, 0x39, 0x70, 0x37, 0xa4, 0x79, 0x9a, 0x17, 0x19, 0xe8, 0x90, 0xe7, - 0x37, 0x0d, 0xb6, 0x6d, 0x58, 0xe6, 0x7e, 0x57, 0x76, 0x8a, 0xe8, 0xd0, - 0x76, 0x30, 0x25, 0xda, 0xb6, 0xdf, 0x59, 0x3c, 0x6c, 0x20, 0x65, 0x88, - 0xd2, 0x60, 0x5e, 0x39, 0xb6, 0x6b, 0xac, 0xa2, 0x25, 0xc6, 0xa7, 0xb1, - 0x2f, 0xbb, 0x1d, 0x23, 0xee, 0x02, 0x08, 0x1d, 0xd6, 0x6c, 0x0e, 0xbc, - 0xea, 0xd2, 0xc2, 0x70, 0x34, 0xe9, 0x96, 0xd3, 0xf3, 0xf4, 0x8e, 0x94, - 0x6f, 0x86, 0x76, 0xe7, 0x38, 0x08, 0x6f, 0x47, 0xf5, 0xcd, 0xab, 0xad, - 0x7a, 0x39, 0x10, 0x9a, 0xa8, 0x44, 0xba, 0x2d, 0x7f, 0x05, 0x1e, 0xb7, - 0x44, 0xd8, 0x10, 0x05, 0xd1, 0x8d, 0x98, 0x09, 0x14, 0xbb, 0x6b, 0x2b, - 0xf7, 0xeb, 0x9f, 0xa5, 0x65, 0x4b, 0x21, 0xff, 0xaf, 0xe8, 0x2e, 0x34, - 0x52, 0x38, 0xcf, 0xd5, 0x51, 0x29, 0x2c, 0x91, 0x43, 0x3a, 0x49, 0x42, - 0xdd, 0xfb, 0x0e, 0xd2, 0x77, 0x8f, 0x65, 0x93, 0x3e, 0x52, 0x22, 0x58, - 0xd6, 0xf9, 0xd9, 0x58, 0xd4, 0x06, 0xa9, 0x0c, 0x79, 0x9f, 0x1b, 0xa5, - 0x45, 0x61, 0xd8, 0x4e, 0xbf, 0x4b, 0x51, 0xe2, 0xfb, 0x6f, 0x58, 0xee, - 0xc5, 0xa5, 0x11, 0xbd, 0x99, 0x25, 0x14, 0xac, 0x94, 0x0e, 0xd1, 0xf7, - 0x54, 0xb6, 0x05, 0x8c, 0xc3, 0x57, 0xa5, 0x3c, 0x3c, 0xa6, 0x83, 0x47, - 0x38, 0xd1, 0x6a, 0xab, 0x12, 0xc0, 0xd3, 0x7f, 0x96, 0x55, 0xd7, 0xf4, - 0x3a, 0xd0, 0x08, 0x85, 0x5f, 0x3d, 0x65, 0x8e, 0xbb, 0xea, 0x34, 0xf3, - 0x53, 0x96, 0x71, 0x08, 0x9b, 0x50, 0xe9, 0x4b, 0xce, 0x8a, 0x2f, 0xef, - 0xe4, 0xb2, 0x72, 0x68, 0xcb, 0x88, 0xa8, 0xd9, 0xd9, 0xa2, 0xfc, 0x62, - 0xe8, 0x8b, 0x23, 0x2b, 0xbc, 0xf0, 0x9e, 0xb4, 0xd0, 0x40, 0x8b, 0x45, - 0xff, 0x6d, 0x37, 0x01, 0xa6, 0x4b, 0x62, 0xe0, 0x3b, 0x4e, 0x18, 0x67, - 0xb3, 0x97, 0x04, 0xa0, 0x2a, 0xf2, 0x11, 0x79, 0x38, 0xb4, 0xb2, 0xed, - 0x64, 0xc1, 0x1e, 0xfe, 0xc4, 0xf4, 0xe2, 0x4d, 0x94, 0xb4, 0x17, 0x52, - 0x1a, 0x63, 0xe6, 0x56, 0x8a, 0x41, 0x0a, 0x5b, 0xa2, 0x1c, 0x59, 0xef, - 0x17, 0x64, 0xf9, 0xf7, 0x2c, 0xa4, 0xfd, 0x66, 0xf7, 0xe3, 0xae, 0xa0, - 0x54, 0x36, 0x64, 0x26, 0x84, 0x51, 0x49, 0xd5, 0x3a, 0x5e, 0x2c, 0xc5, - 0xca, 0xde, 0x8e, 0xe7, 0x25, 0x59, 0xb3, 0x9a, 0xb2, 0xf0, 0xff, 0xf1, - 0x83, 0xe5, 0x70, 0xc3, 0xef, 0x63, 0x66, 0x31, 0x04, 0x4d, 0x42, 0xf1, - 0xd9, 0x4c, 0x5e, 0x29, 0x92, 0x37, 0x8d, 0xd1, 0x18, 0x2a, 0x9e, 0x3c, - 0xcc, 0x05, 0xb9, 0xc4, 0xb6, 0xe7, 0x2a, 0x09, 0x3a, 0x68, 0xb5, 0x61, - 0x60, 0x36, 0x11, 0x02, 0x92, 0xf8, 0xa0, 0x56, 0x9b, 0xe8, 0xfe, 0xac, - 0x87, 0xcc, 0xaf, 0xb9, 0x62, 0xa7, 0x1e, 0x99, 0xb8, 0x9f, 0x47, 0xf7, - 0xa5, 0x12, 0x47, 0x66, 0xeb, 0xd6, 0x3a, 0x6f, 0xb3, 0x26, 0x63, 0xe2, - 0xec, 0x0c, 0xba, 0x7d, 0xc2, 0x9b, 0xb2, 0x10, 0x62, 0x03, 0x3f, 0x20, - 0xed, 0x7a, 0xce, 0x47, 0xd0, 0x50, 0x5b, 0x5c, 0x66, 0xbf, 0x01, 0x09, - 0x84, 0x0b, 0x71, 0xa8, 0x1f, 0x8d, 0xe1, 0x05, 0x09, 0xb4, 0xd5, 0x34, - 0xf1, 0xba, 0x31, 0xc6, 0x76, 0x8e, 0x00, 0x96, 0x3d, 0x6b, 0xe4, 0x66, - 0x3a, 0x22, 0xcd, 0x7f, 0x9d, 0xf8, 0x64, 0xfc, 0x76, 0x42, 0x88, 0x0e, - 0x32, 0xa5, 0xd0, 0x69, 0x56, 0xe2, 0xa5, 0x6f, 0xbb, 0xfa, 0xd8, 0xde, - 0xb4, 0x23, 0xa9, 0xc7, 0x9a, 0xc1, 0x99, 0xa7, 0x7f, 0x79, 0x58, 0xe1, - 0xe7, 0xc5, 0x56, 0x36, 0xc0, 0xfb, 0x8d, 0x8f, 0xe4, 0x6c, 0x96, 0x89, - 0xcb, 0xb0, 0xb0, 0x6e, 0xee, 0x20, 0x46, 0xd3, 0x43, 0x83, 0xac, 0x39, - 0x7c, 0x25, 0xba, 0x69, 0x3a, 0x58, 0x8a, 0x48, 0x0a, 0xf7, 0xb7, 0xfc, - 0x58, 0x7b, 0x93, 0x8b, 0xcd, 0x81, 0x7e, 0x94, 0xe0, 0xdf, 0xb1, 0xca, - 0xf6, 0x60, 0x54, 0xa9, 0x6e, 0xc6, 0x7f, 0xac, 0xfb, 0x62, 0xfe, 0xd9, - 0xd5, 0xf4, 0x6c, 0x62, 0x65, 0xf6, 0x0b, 0x24, 0x49, 0x1d, 0x55, 0xd6, - 0x4c, 0x0b, 0x5a, 0xf1, 0x2e, 0x78, 0x7a, 0x4e, 0xc1, 0xd0, 0xdb, 0xfe, - 0xd2, 0x84, 0x60, 0x68, 0x51, 0x8e, 0x3f, 0xf1, 0xa8, 0x90, 0xbf, 0xda, - 0x86, 0xda, 0x41, 0xd8, 0x90, 0x7b, 0xc3, 0xc8, 0x9e, 0xa5, 0x77, 0x06, - 0x56, 0x02, 0x13, 0x59, 0xaa, 0x89, 0xf9, 0xd5, 0x3c, 0x1d, 0xe2, 0xa9, - 0xb1, 0xc8, 0x02, 0x5a, 0x1c, 0xae, 0x72, 0x66, 0xdf, 0xb4, 0x1a, 0xb7, - 0xd2, 0x4d, 0xda, 0x4f, 0xc9, 0xed, 0x88, 0x7d, 0x9b, 0xc4, 0x4a, 0x8c, - 0x5e, 0x77, 0xaf, 0xd6, 0xd3, 0xbb, 0x38, 0xd2, 0xfa, 0x85, 0xe4, 0xdd, - 0xe7, 0x6e, 0xcb, 0x0b, 0x34, 0x1e, 0xa8, 0xfd, 0xf4, 0xd2, 0xc3, 0xdd, - 0xe0, 0xa6, 0xb1, 0x78, 0x16, 0x85, 0x2b, 0x1b, 0x22, 0xa6, 0xd5, 0x93, - 0x4f, 0xa1, 0xd5, 0x10, 0x96, 0xab, 0x38, 0xa7, 0x3c, 0xf2, 0xbd, 0xd9, - 0x7c, 0x59, 0x71, 0x25, 0x6f, 0x7c, 0xce, 0x73, 0x8e, 0x4e, 0xfb, 0x5a, - 0x30, 0x24, 0x53, 0xc5, 0xa3, 0x20, 0x13, 0x03, 0xfc, 0x7a, 0xaf, 0x1f, - 0x71, 0x5d, 0x6b, 0xce, 0x2e, 0x92, 0x16, 0x4d, 0xab, 0x96, 0x10, 0xc0, - 0xf6, 0x3c, 0xfe, 0x51, 0x89, 0x4d, 0x39, 0x45, 0x2c, 0x92, 0x5a, 0x86, - 0x24, 0xce, 0xbc, 0x75, 0xc6, 0x7f, 0x0e, 0xc2, 0xd1, 0xe7, 0x6a, 0x75, - 0x30, 0x59, 0xfb, 0xbf, 0x6b, 0xcf, 0x60, 0x90, 0x07, 0x73, 0xb1, 0x47, - 0x6e, 0x5d, 0xcd, 0x44, 0xac, 0xee, 0x2a, 0xdb, 0x16, 0x5a, 0x1a, 0xaf, - 0xba, 0xf8, 0x64, 0xdd, 0xdd, 0xed, 0x46, 0x4b, 0x67, 0xf3, 0xf8, 0x2d, - 0x22, 0xe9, 0x25, 0x74, 0x4c, 0x70, 0xe0, 0x3d, 0xbc, 0x11, 0xd3, 0x56, - 0xec, 0x86, 0x39, 0x89, 0x4c, 0xf2, 0xbc, 0x39, 0xdc, 0xde, 0x5f, 0x3b, - 0x42, 0xcb, 0xf6, 0x0c, 0x49, 0x8c, 0x66, 0x76, 0x58, 0x28, 0xe8, 0x47, - 0x59, 0x40, 0x11, 0xef, 0xb5, 0x9d, 0x93, 0xe5, 0x39, 0x56, 0x62, 0x0d, - 0xd0, 0xdd, 0xbb, 0x51, 0xff, 0x87, 0xa3, 0xd1, 0x9e, 0x0e, 0x0c, 0xbd, - 0x8e, 0xfc, 0xa5, 0x44, 0xc7, 0x6d, 0x35, 0x1d, 0x69, 0x14, 0x5b, 0x0d, - 0x45, 0xff, 0x85, 0x2d, 0xd1, 0x14, 0xf4, 0x5e, 0x5b, 0x49, 0x85, 0xad, - 0x69, 0xf1, 0x34, 0x9e, 0x7a, 0xf3, 0xed, 0x2d, 0xf2, 0x5f, 0x70, 0x5a, - 0xc1, 0xca, 0x63, 0xb5, 0xec, 0x49, 0xfc, 0x88, 0xcb, 0x0f, 0x81, 0x1d, - 0xd4, 0x2f, 0x18, 0xf6, 0xfe, 0x71, 0x51, 0xe2, 0x25, 0x71, 0x48, 0xa4, - 0xb2, 0x9f, 0x4f, 0xc0, 0xa5, 0x24, 0x12, 0x5b, 0xf8, 0xf2, 0xcf, 0x6e, - 0x52, 0x52, 0x6a, 0xee, 0x7d, 0xa5, 0x9b, 0xdb, 0x9c, 0xc9, 0x35, 0x30, - 0x1a, 0xf0, 0x7d, 0xcc, 0x98, 0x73, 0x09, 0x16, 0x8c, 0x05, 0x8d, 0x70, - 0xa3, 0x15, 0xd6, 0x7a, 0xa0, 0x7c, 0xd5, 0xcc, 0xd3, 0x29, 0x32, 0x2e, - 0xa5, 0xde, 0xf6, 0xd3, 0xa4, 0x03, 0x59, 0x6c, 0x05, 0x2d, 0x0e, 0x8b, - 0xb7, 0x1f, 0xa0, 0x57, 0x5c, 0x76, 0xde, 0x81, 0xcb, 0x64, 0xb9, 0x73, - 0xc1, 0x3b, 0x26, 0xba, 0x16, 0xdb, 0xe6, 0x40, 0x23, 0xa4, 0xe9, 0x24, - 0x48, 0xb8, 0x73, 0x23, 0x67, 0xbf, 0x26, 0xca, 0x95, 0x4f, 0xa0, 0x60, - 0x95, 0xa2, 0x0f, 0x29, 0xed, 0x5d, 0x71, 0x66, 0x94, 0xa3, 0xd0, 0x2a, - 0x4e, 0x17, 0x32, 0x18, 0xe6, 0xd6, 0x75, 0x84, 0xa5, 0x2a, 0x72, 0x18, - 0x60, 0x85, 0xde, 0x66, 0x22, 0x52, 0xf6, 0x45, 0xd6, 0xf0, 0xed, 0x93, - 0x0f, 0x5a, 0xa9, 0x12, 0x2a, 0xc4, 0xa8, 0x3d, 0x97, 0xc9, 0xc7, 0x84, - 0x71, 0x14, 0xb3, 0x54, 0xb6, 0xf7, 0x92, 0x7a, 0xc0, 0x6e, 0x02, 0xf7, - 0x48, 0xdb, 0x7c, 0xc1, 0x45, 0x21, 0xdb, 0x1b, 0x51, 0xc3, 0xea, 0xc0, - 0x19, 0x31, 0xe4, 0x6c, 0x20, 0x5f, 0x08, 0xe7, 0x88, 0xf7, 0xc0, 0x6e, - 0xee, 0x5f, 0x20, 0x33, 0x68, 0xef, 0xc5, 0x33, 0x1b, 0x40, 0x66, 0xc5, - 0xa3, 0x68, 0xdb, 0xbc, 0x8a, 0xb7, 0x54, 0xdb, 0xc7, 0xc5, 0x2c, 0x42, - 0x65, 0x51, 0xab, 0x56, 0x94, 0x73, 0xec, 0xd9, 0x95, 0xfa, 0x6a, 0x56, - 0xef, 0x22, 0x95, 0xa4, 0x75, 0x46, 0xee, 0x60, 0x8b, 0x25, 0xa6, 0x92, - 0x0a, 0x8e, 0xc1, 0x39, 0x97, 0x69, 0xa9, 0x19, 0x97, 0xf1, 0x0f, 0x61, - 0xc2, 0x40, 0x7d, 0x62, 0xe9, 0x5e, 0x22, 0x1f, 0x27, 0xe5, 0xc7, 0xe7, - 0xa4, 0x35, 0x5d, 0x90, 0xc7, 0x38, 0x38, 0x2d, 0xb0, 0x1e, 0x29, 0x0f, - 0x4f, 0x08, 0x8b, 0xdd, 0x69, 0x3c, 0x5c, 0x03, 0xbe, 0x9a, 0x76, 0xba, - 0x91, 0xf5, 0x57, 0x07, 0x39, 0xfe, 0x09, 0xfc, 0x01, 0x7b, 0x37, 0xc4, - 0x73, 0x7f, 0x76, 0x50, 0x76, 0xae, 0x6e, 0x4b, 0x22, 0x2c, 0x3b, 0xe7, - 0x77, 0x19, 0x9a, 0x92, 0x26, 0xdf, 0xc4, 0xe6, 0xd8, 0x57, 0xc1, 0x7f, - 0x65, 0x0b, 0xfb, 0xfa, 0xdd, 0xd2, 0x8c, 0xc7, 0xb1, 0x72, 0x2a, 0xb2, - 0x5a, 0xfa, 0xb2, 0x84, 0xb1, 0xec, 0x79, 0x9e, 0xde, 0xd8, 0x2f, 0xdf, - 0x3b, 0x39, 0x0b, 0xac, 0xfa, 0xb8, 0x07, 0x38, 0xff, 0x2e, 0x22, 0x2b, - 0xc9, 0x31, 0x3b, 0x09, 0x05, 0xd2, 0x06, 0xc4, 0x2d, 0x22, 0x1c, 0x21, - 0x70, 0x03, 0x93, 0xd1, 0x3a, 0x8d, 0x94, 0x60, 0xfe, 0x99, 0x13, 0xc3, - 0x00, 0x03, 0x41, 0xfa, 0x50, 0x79, 0x31, 0xeb, 0xf0, 0xf4, 0x06, 0x7a, - 0x19, 0xe8, 0x90, 0xdf, 0x61, 0x4d, 0x5f, 0xe3, 0x99, 0x1b, 0xca, 0xbf, - 0xcf, 0xae, 0xca, 0xfa, 0x84, 0x63, 0x88, 0x56, 0x1d, 0x52, 0x5a, 0x21, - 0xf9, 0xcd, 0xa3, 0x30, 0x16, 0xb9, 0x0d, 0xe1, 0x87, 0x08, 0x78, 0xa2, - 0xdb, 0x7e, 0x16, 0x82, 0x48, 0x48, 0x17, 0x1a, 0xa8, 0x3f, 0xc7, 0x4d, - 0xfd, 0x99, 0x2b, 0x36, 0xbf, 0x08, 0xb9, 0xeb, 0xa6, 0xbf, 0xb6, 0xa0, - 0x9e, 0x26, 0x15, 0xac, 0xd2, 0x65, 0xc9, 0x36, 0x41, 0xe3, 0x59, 0x4e, - 0xdc, 0x7b, 0x58, 0x3b, 0x47, 0x0b, 0xc9, 0xf3, 0xb3, 0xf9, 0x81, 0x33, - 0x39, 0xca, 0xf8, 0x97, 0x2d, 0x9b, 0x24, 0x33, 0x69, 0xbe, 0x1b, 0x81, - 0x59, 0x59, 0x17, 0xed, 0x7d, 0x5b, 0xbe, 0xda, 0xeb, 0x4e, 0x5d, 0x5d, - 0x70, 0x13, 0x3c, 0x4b, 0x4a, 0xfc, 0xa4, 0xbe, 0xa0, 0x5d, 0xa2, 0xed, - 0xe8, 0x8d, 0xf8, 0xf2, 0xa5, 0xdd, 0xd4, 0x49, 0x45, 0x04, 0xef, 0x18, - 0x9f, 0xa1, 0xf7, 0xc4, 0x3b, 0xc2, 0x6b, 0xe0, 0x45, 0xa8, 0x76, 0x39, - 0x49, 0x32, 0xec, 0xc3, 0xcb, 0x45, 0x46, 0xd2, 0x4b, 0x3a, 0x55, 0xe5, - 0xce, 0x08, 0xc4, 0x84, 0xe5, 0xd9, 0xb3, 0xf3, 0xc4, 0xa8, 0xe9, 0x88, - 0x83, 0xd5, 0x56, 0xe1, 0xa6, 0xef, 0x41, 0x55, 0xb0, 0x3f, 0xa3, 0xc1, - 0xbe, 0x3b, 0x83, 0xd6, 0x92, 0x90, 0x38, 0xd3, 0xf3, 0x75, 0xf6, 0x49, - 0x95, 0xee, 0xa9, 0xed, 0xaa, 0xf8, 0xb9, 0x14, 0x0e, 0x6a, 0x48, 0x9d, - 0xc5, 0x48, 0x3b, 0x5e, 0x61, 0xd3, 0x8c, 0x4a, 0x10, 0x12, 0x7c, 0x0a, - 0xf7, 0xaf, 0x62, 0x2d, 0xd3, 0x89, 0x8d, 0x75, 0x19, 0x6b, 0x62, 0x4b, - 0x1a, 0x04, 0xc7, 0xd3, 0x32, 0x17, 0x2f, 0x5f, 0x29, 0xfa, 0xb1, 0x8d, - 0x78, 0xe7, 0x27, 0xf6, 0x67, 0x7e, 0x17, 0xa3, 0x18, 0xdc, 0x13, 0x08, - 0x1e, 0x4b, 0xc7, 0x8e, 0xf6, 0xba, 0x90, 0xb3, 0x32, 0x42, 0x37, 0x6b, - 0x60, 0xa9, 0x23, 0xb5, 0x89, 0x57, 0x7b, 0xdb, 0x98, 0x35, 0x1f, 0x95, - 0x86, 0xa5, 0x83, 0x36, 0xd1, 0x8c, 0x8e, 0xc0, 0x77, 0x5c, 0x40, 0x8e, - 0xec, 0xdf, 0x25, 0x69, 0x0a, 0x83, 0x8f, 0xdf, 0x91, 0x52, 0x31, 0xab, - 0xd5, 0x61, 0x37, 0xbd, 0x83, 0x1d, 0x4c, 0x8b, 0xa1, 0x4a, 0x81, 0x8b, - 0xa0, 0xf4, 0x41, 0xbd, 0x54, 0x36, 0x36, 0x56, 0x6d, 0x4c, 0xe7, 0xd9, - 0xc7, 0x09, 0xd9, 0x4b, 0xf0, 0x54, 0x45, 0x3c, 0x62, 0x47, 0x17, 0x54, - 0x1f, 0x55, 0x2f, 0x74, 0xdc, 0x11, 0xe9, 0xa3, 0xb5, 0x75, 0xe9, 0x10, - 0xde, 0x62, 0xa9, 0x24, 0x39, 0xd4, 0x17, 0xbb, 0x15, 0xe4, 0x48, 0x09, - 0x26, 0x6a, 0xbd, 0x3b, 0x10, 0xa1, 0x55, 0xe5, 0x99, 0x53, 0x1e, 0xd2, - 0xee, 0x7c, 0x54, 0xd8, 0x06, 0x8b, 0x1e, 0xe7, 0x3f, 0x08, 0x38, 0x9b, - 0x2e, 0x41, 0xdf, 0x0b, 0x7e, 0x83, 0x7f, 0x04, 0x38, 0xa5, 0x1f, 0x46, - 0x8b, 0x94, 0x28, 0x9f, 0xb8, 0x8c, 0x41, 0xfe, 0x96, 0xe2, 0x24, 0xd1, - 0x97, 0xa4, 0xcb, 0xba, 0xfa, 0x19, 0xc9, 0x57, 0x30, 0x0f, 0x88, 0x58, - 0xa9, 0x67, 0x31, 0x74, 0x51, 0x34, 0x03, 0xbc, 0xff, 0x3b, 0x12, 0x61, - 0x84, 0x63, 0x74, 0xec, 0x4d, 0xda, 0xa3, 0x56, 0xc3, 0xe5, 0x5e, 0x4a, - 0x03, 0x26, 0x88, 0x1a, 0x1d, 0x7f, 0xe8, 0x3f, 0x61, 0x78, 0xb6, 0xc5, - 0x66, 0xb7, 0xb4, 0xc1, 0xe7, 0x82, 0xc1, 0x44, 0xdf, 0xf9, 0x30, 0x30, - 0xe1, 0xd0, 0xf8, 0xf5, 0x40, 0x5a, 0x72, 0x29, 0xef, 0x30, 0xe1, 0x01, - 0xca, 0x1b, 0xb0, 0xa6, 0xa3, 0x17, 0x2b, 0x58, 0x03, 0xda, 0x25, 0x0f, - 0xdc, 0x49, 0x7c, 0xc5, 0x8f, 0x2d, 0x83, 0xca, 0x43, 0x08, 0xc0, 0x36, - 0x70, 0x1e, 0x42, 0xfd, 0xac, 0x4d, 0x31, 0xcf, 0x68, 0x4a, 0xda, 0xd8, - 0xcb, 0xee, 0xaa, 0xfc, 0xcf, 0xcc, 0xe6, 0xb2, 0x77, 0x8b, 0x83, 0x5b, - 0xd5, 0x3d, 0x55, 0xba, 0x03, 0x45, 0xce, 0x51, 0x78, 0x36, 0xcb, 0xcd, - 0x9a, 0x0f, 0x58, 0xbe, 0x15, 0x10, 0xdb, 0x3f, 0x1d, 0x28, 0x27, 0x11, - 0x69, 0xca, 0x95, 0x68, 0xa8, 0xc8, 0xff, 0x0c, 0x3f, 0xd5, 0x11, 0x91, - 0x35, 0x45, 0x35, 0x9d, 0x1c, 0x58, 0xa2, 0xe5, 0xab, 0x83, 0x95, 0x10, - 0x44, 0xd4, 0xc0, 0x27, 0xf4, 0xc2, 0x72, 0x0f, 0x1a, 0x3d, 0x1c, 0xf2, - 0x7f, 0xb9, 0x54, 0xf2, 0x41, 0x24, 0xa8, 0x67, 0x30, 0xa0, 0x57, 0x67, - 0x00, 0xa8, 0x06, 0x60, 0xc3, 0x74, 0x6d, 0x54, 0x90, 0x5e, 0xad, 0x71, - 0x41, 0x50, 0xab, 0x9d, 0xba, 0x34, 0x1a, 0xfd, 0x19, 0x21, 0x0e, 0x87, - 0xb7, 0x22, 0xe6, 0xca, 0xb9, 0x0d, 0x3c, 0x4f, 0xad, 0x16, 0xf1, 0xa5, - 0x6d, 0xba, 0x6d, 0x7b, 0xbe, 0x7b, 0xe3, 0x95, 0xec, 0x1b, 0x8b, 0x6e, - 0xb0, 0xdc, 0x5c, 0xfd, 0x31, 0x73, 0x85, 0x02, 0x63, 0xc6, 0xcc, 0x04, - 0x29, 0xa5, 0xf4, 0x1f, 0xcb, 0x90, 0xf7, 0x83, 0x0d, 0x36, 0xbf, 0x31, - 0xc0, 0xfc, 0x26, 0x15, 0x87, 0xc8, 0x15, 0x88, 0xc9, 0x79, 0x11, 0x67, - 0x23, 0x53, 0xca, 0x03, 0x7a, 0x02, 0xe5, 0xfc, 0xb3, 0x38, 0xf3, 0x5d, - 0xfc, 0x91, 0x6f, 0x59, 0x26, 0xae, 0xd8, 0x45, 0xfa, 0xc4, 0x5b, 0xa2, - 0xfb, 0x2c, 0xc5, 0x36, 0xc6, 0x0d, 0x7b, 0x4e, 0xd2, 0x7f, 0x61, 0xc5, - 0xcc, 0x74, 0xd3, 0x41, 0xd4, 0x8a, 0xaf, 0xcb, 0x32, 0x50, 0xca, 0xeb, - 0x59, 0x0a, 0x05, 0x25, 0xe0, 0x5f, 0x30, 0x2b, 0x5d, 0x9b, 0xf7, 0xe8, - 0x14, 0x14, 0xb5, 0xfe, 0xd5, 0x2f, 0x94, 0x84, 0x5b, 0xc7, 0x4f, 0x82, - 0x01, 0x50, 0xbf, 0x54, 0xe2, 0x7d, 0xeb, 0x0c, 0x85, 0xc8, 0x99, 0x45, - 0x50, 0x8e, 0x4e, 0x10, 0x12, 0x01, 0x17, 0x41, 0xf3, 0x21, 0x4a, 0xee, - 0xaf, 0x0f, 0x76, 0x44, 0xe2, 0x8e, 0xf8, 0x36, 0x25, 0xab, 0x0d, 0x8f, - 0xb1, 0x0a, 0xbf, 0x63, 0x0e, 0xf2, 0x0c, 0x9d, 0x39, 0xa1, 0x98, 0x98, - 0x69, 0x91, 0xd1, 0x9b, 0xe8, 0xcf, 0x16, 0x65, 0x02, 0xc9, 0x67, 0x72, - 0x71, 0x7c, 0xfb, 0x41, 0x2d, 0xe4, 0xd3, 0xfb, 0x44, 0x8a, 0x7a, 0x88, - 0x32, 0x62, 0x26, 0x63, 0xfe, 0x5b, 0x0c, 0x4f, 0x6c, 0xad, 0x2f, 0x64, - 0x6f, 0xc9, 0xda, 0x95, 0x10, 0xbe, 0xd1, 0xfa, 0x8b, 0x67, 0x64, 0x35, - 0x2d, 0xed, 0xca, 0xf3, 0x12, 0xb7, 0x06, 0xc3, 0xa9, 0x8e, 0x3f, 0x09, - 0x4d, 0x1f, 0x50, 0x3a, 0x97, 0xb7, 0xa7, 0xce, 0x4d, 0x46, 0xf1, 0x61, - 0xc1, 0x06, 0x95, 0x0d, 0x07, 0xa2, 0xbc, 0xed, 0xeb, 0x45, 0xb4, 0x69, - 0x05, 0x7a, 0x30, 0x47, 0xa3, 0xbf, 0x81, 0xa9, 0xa7, 0xf0, 0x53, 0x36, - 0x31, 0x37, 0x13, 0xe5, 0x0e, 0xd6, 0xe6, 0xc7, 0x17, 0x17, 0x21, 0x6d, - 0x36, 0xd0, 0xf6, 0x2a, 0xea, 0x2d, 0x32, 0x0e, 0x90, 0x03, 0x30, 0x4d, - 0x30, 0x31, 0xaa, 0x79, 0x2d, 0xae, 0x2e, 0xb0, 0x13, 0xad, 0x63, 0x69, - 0x67, 0xd8, 0xf3, 0x6e, 0xa4, 0x34, 0xcf, 0x02, 0x10, 0xdd, 0x76, 0xfa, - 0xa7, 0xb0, 0x92, 0xea, 0x47, 0xbd, 0xff, 0xf9, 0xac, 0x8a, 0x1f, 0x31, - 0xf8, 0x05, 0xd4, 0xce, 0x23, 0xad, 0x32, 0x8c, 0x6c, 0x92, 0x85, 0xb9, - 0x74, 0xa6, 0xab, 0x6e, 0x76, 0xfd, 0x3e, 0x8a, 0xac, 0xa3, 0xd1, 0xb7, - 0x40, 0x53, 0x87, 0x28, 0xfc, 0xbc, 0x8a, 0x52, 0x8e, 0x2e, 0x59, 0x2c, - 0x5f, 0x3f, 0xcb, 0xd8, 0xbe, 0x37, 0xfd, 0xdc, 0xc0, 0x34, 0x85, 0x67, - 0x28, 0x9f, 0x1d, 0x05, 0x05, 0x94, 0xed, 0x6f, 0x54, 0x7a, 0x51, 0x9a, - 0xaa, 0xca, 0xe1, 0x41, 0x10, 0xf0, 0x9d, 0x38, 0x9c, 0x5e, 0x95, 0xe3, - 0x7e, 0x62, 0xe2, 0x31, 0x81, 0x28, 0x4a, 0x3c, 0x5e, 0x04, 0x11, 0xe2, - 0x6a, 0x45, 0x6f, 0x68, 0x96, 0x5b, 0xbf, 0x22, 0xd8, 0x29, 0x91, 0x76, - 0xe1, 0xb2, 0x5f, 0xfc, 0x89, 0x90, 0x87, 0xf8, 0xb8, 0x3f, 0xd5, 0x11, - 0xe7, 0x36, 0x47, 0x71, 0xb9, 0x52, 0x97, 0x8e, 0x62, 0x8b, 0x05, 0x31, - 0xe5, 0xd9, 0xa2, 0xc3, 0x1a, 0xb5, 0xda, 0xc7, 0xa5, 0x37, 0x06, 0x67, - 0x41, 0x1f, 0x6e, 0xa3, 0xc2, 0xb4, 0x96, 0x64, 0xfc, 0x46, 0x85, 0x95, - 0x4e, 0xd8, 0x2a, 0x4b, 0xaa, 0x1e, 0xec, 0xd5, 0xed, 0x81, 0x23, 0x68, - 0x0f, 0x5d, 0x0b, 0x95, 0x29, 0xd4, 0x36, 0x4d, 0x8c, 0x32, 0x73, 0x6a, - 0xb7, 0xad, 0xb8, 0x9c, 0xad, 0x76, 0x09, 0xad, 0xb9, 0xea, 0x2d, 0x17, - 0x3c, 0x33, 0x87, 0x7f, 0x62, 0x74, 0x77, 0xc9, 0xd6, 0x3d, 0x17, 0xbc, - 0xff, 0x57, 0x10, 0xec, 0x7a, 0xb7, 0x89, 0x05, 0x26, 0xf1, 0xb2, 0x53, - 0xa1, 0x91, 0xc5, 0x2a, 0xfb, 0x5a, 0xce, 0x5d, 0xd1, 0x6b, 0xbc, 0xb7, - 0x39, 0x09, 0x43, 0xdf, 0x20, 0xd3, 0xc1, 0x74, 0x8d, 0xf4, 0x0b, 0x2a, - 0xc7, 0xe8, 0xa1, 0x5f, 0xb2, 0xfe, 0x1a, 0x96, 0x3a, 0x92, 0xbc, 0x8f, - 0x85, 0xe2, 0x22, 0x73, 0x3f, 0x49, 0xb3, 0x6b, 0x90, 0xbd, 0xcb, 0x3f, - 0x36, 0x6c, 0x3d, 0xe3, 0x00, 0x00, 0x00, 0x00, 0x56, 0xd1, 0xff, 0xff, - 0x04, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x1f, 0x05, 0x81, 0x3f, - 0x25, 0x68, 0xde, 0x72, 0x88, 0x26, 0x66, 0x2d, 0xe4, 0xc8, 0x81, 0xf8, - 0x5d, 0x98, 0xa2, 0xc2, 0x02, 0x62, 0x63, 0x47, 0xe6, 0x61, 0x7f, 0xee, - 0xca, 0x3f, 0x81, 0xd7, 0x1e, 0xa9, 0xbf, 0x66, 0x59, 0x7f, 0xc3, 0x35, - 0x03, 0xae, 0xe5, 0xf2, 0x4d, 0x81, 0x82, 0x78, 0x5e, 0xaf, 0xaa, 0xd1, - 0x27, 0x41, 0x19, 0x93, 0xa8, 0x9b, 0x78, 0x4e, 0x95, 0x89, 0x7f, 0xce, - 0x49, 0xd0, 0x45, 0xb5, 0x7f, 0x1d, 0xe9, 0xee, 0x7f, 0x91, 0xf4, 0x0a, - 0x67, 0x7d, 0x75, 0xff, 0x38, 0x81, 0x27, 0x90, 0x14, 0xa5, 0x99, 0x40, - 0x5b, 0xe6, 0x9a, 0x81, 0x75, 0x22, 0x5f, 0x18, 0x81, 0x34, 0xb7, 0x54, - 0x2e, 0x8d, 0x81, 0x36, 0x0e, 0x5e, 0xc0, 0x5f, 0xd4, 0xc6, 0x34, 0x81, - 0xc8, 0xb9, 0xe2, 0xa9, 0x77, 0x81, 0x44, 0xb4, 0x06, 0x24, 0x81, 0x74, - 0x1c, 0xeb, 0xfb, 0xdd, 0x25, 0x81, 0x14, 0x09, 0x2d, 0xba, 0x11, 0x4b, - 0x07, 0x13, 0xf1, 0xae, 0x81, 0xaf, 0xa3, 0x87, 0x00, 0x00, 0x00, 0x00, - 0xf6, 0xd1, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x00, 0x00, - 0x8a, 0x29, 0x03, 0xe6, 0x24, 0x2a, 0xd6, 0x21, 0xb6, 0xb1, 0x2d, 0x3a, - 0xff, 0xd6, 0x27, 0xd7, 0x18, 0x42, 0xc1, 0xb4, 0xf8, 0xfd, 0xdf, 0x45, - 0x09, 0x91, 0xcb, 0xfe, 0xe9, 0xb5, 0x24, 0xf1, 0xc0, 0x69, 0xd0, 0x64, - 0xa8, 0xeb, 0x12, 0x71, 0xe3, 0xb4, 0xbe, 0xb4, 0x93, 0xbf, 0x8a, 0x8b, - 0xf3, 0x4d, 0x13, 0x3b, 0x6f, 0x6f, 0x32, 0x12, 0x98, 0x95, 0xb9, 0x63, - 0xcd, 0xa5, 0x23, 0xa4, 0xb8, 0x2e, 0x74, 0x75, 0xbc, 0xe4, 0xc7, 0x46, - 0x96, 0xd4, 0x47, 0xa0, 0x65, 0xec, 0xea, 0xcf, 0xd0, 0xdc, 0xe9, 0x8b, - 0xcc, 0x1d, 0x2f, 0x0d, 0x0a, 0x9c, 0x6e, 0x99, 0x97, 0x97, 0xcc, 0x00, - 0xd2, 0x8e, 0xbc, 0x3c, 0x9a, 0xf1, 0x32, 0x0e, 0xf3, 0xd6, 0x27, 0x1c, - 0xea, 0xab, 0xca, 0x4d, 0x69, 0x32, 0x30, 0x5f, 0x18, 0xd7, 0xb7, 0x4a, - 0xcb, 0x8e, 0xb2, 0x96, 0x39, 0xa3, 0xc7, 0x42, 0xca, 0x60, 0x9b, 0xad, - 0x8e, 0xb7, 0x54, 0x32, 0xea, 0xfd, 0x58, 0xfa, 0xf8, 0x02, 0xef, 0x2f, - 0xec, 0x3c, 0x2a, 0x1a, 0x6a, 0x08, 0xa4, 0x4b, 0xec, 0x30, 0x90, 0xaf, - 0x13, 0x98, 0xcd, 0x48, 0xfd, 0x5f, 0x56, 0x68, 0x17, 0x9e, 0x87, 0xb1, - 0x2b, 0x16, 0xd3, 0x3c, 0xe0, 0xe8, 0x0e, 0xa6, 0xc4, 0x24, 0xd3, 0x05, - 0x75, 0xda, 0x22, 0x44, 0xb5, 0x41, 0xd2, 0xa5, 0x99, 0xf1, 0x5e, 0xbe, - 0x15, 0xb7, 0x33, 0x54, 0x9a, 0x97, 0x5b, 0x35, 0x77, 0x2b, 0x18, 0x46, - 0x2f, 0x92, 0xc5, 0x97, 0x2d, 0x4c, 0xa6, 0xf8, 0x9e, 0xc3, 0xe0, 0x0a, - 0x52, 0xf9, 0x97, 0xc7, 0xd6, 0x36, 0xdd, 0x38, 0xaa, 0xf3, 0x05, 0x30, - 0xc3, 0xe5, 0xaf, 0x54, 0xdc, 0xc4, 0xf2, 0x01, 0x9e, 0xe6, 0xc1, 0x89, - 0xee, 0xd8, 0x5f, 0xfe, 0xf0, 0x70, 0x3c, 0xc4, 0x40, 0xa4, 0xd4, 0xee, - 0xaf, 0x3d, 0xe6, 0xcd, 0x31, 0x16, 0x31, 0x3b, 0xa0, 0x0e, 0xc4, 0x71, - 0xbf, 0xbd, 0x39, 0x89, 0x0f, 0x36, 0xba, 0xd8, 0xa2, 0x49, 0x01, 0xab, - 0xf4, 0x07, 0x99, 0xc7, 0xb1, 0x0c, 0x33, 0x9d, 0x71, 0xf1, 0x15, 0x4b, - 0x60, 0xe0, 0xed, 0x59, 0x0a, 0x34, 0xd9, 0xa2, 0x45, 0x99, 0x4a, 0x60, - 0xd3, 0xdc, 0x37, 0x56, 0x32, 0x4c, 0xea, 0xdc, 0xcf, 0xe6, 0x22, 0x27, - 0x17, 0xea, 0x75, 0x3f, 0x69, 0xd4, 0xcf, 0x53, 0x92, 0x98, 0xf4, 0xfe, - 0x13, 0xa8, 0xe2, 0xb2, 0x48, 0x5f, 0x64, 0xab, 0x2b, 0x61, 0x97, 0xf5, - 0xc5, 0xb6, 0xef, 0x32, 0x4e, 0x47, 0x26, 0x42, 0x48, 0x9c, 0x5b, 0x24, - 0xa3, 0xcb, 0x70, 0xc7, 0x31, 0x6c, 0xc8, 0x4d, 0x5c, 0x02, 0xca, 0x71, - 0x1e, 0x56, 0xdb, 0x27, 0x66, 0x5d, 0x4f, 0x0b, 0x09, 0x57, 0xbe, 0x72, - 0x17, 0x3b, 0xce, 0xdd, 0xd2, 0x20, 0x13, 0x67, 0x32, 0x04, 0xee, 0xc4, - 0x66, 0x23, 0x0e, 0x97, 0x5e, 0x21, 0x30, 0xb2, 0xe4, 0x16, 0x06, 0x57, - 0xc3, 0x9b, 0x29, 0x5b, 0x76, 0xd0, 0x36, 0xac, 0xe6, 0xa2, 0x91, 0x57, - 0x96, 0x4e, 0x1c, 0x6f, 0x4a, 0x03, 0x50, 0x55, 0x6d, 0xaf, 0x9a, 0x29, - 0xc9, 0x61, 0x6c, 0x18, 0x4c, 0xb9, 0xd5, 0x41, 0xf8, 0x75, 0x2b, 0xc3, - 0x0e, 0x69, 0x9f, 0x45, 0x93, 0x2f, 0xa6, 0xf9, 0x30, 0x65, 0x05, 0x13, - 0xe3, 0x00, 0x54, 0x0e, 0xa4, 0xb5, 0x89, 0x6d, 0x4d, 0x11, 0x3d, 0x2a, - 0x29, 0x99, 0xd9, 0xdf, 0x75, 0xce, 0x01, 0x21, 0xbc, 0x26, 0xb3, 0x22, - 0xf9, 0xb0, 0x45, 0x5c, 0xf8, 0xea, 0xb2, 0x08, 0x1a, 0xf7, 0xa0, 0x70, - 0x65, 0xa8, 0xab, 0xe1, 0x92, 0xcc, 0xcc, 0x1f, 0x0e, 0x36, 0x60, 0xb7, - 0xea, 0xcb, 0x3d, 0xf6, 0x98, 0xbf, 0xcd, 0x00, 0xc9, 0x16, 0x1e, 0xdb, - 0x58, 0x24, 0xb1, 0xd8, 0xaf, 0x01, 0x00, 0xfa, 0x15, 0xf4, 0x37, 0x05, - 0xd7, 0x17, 0x2a, 0xd2, 0xe8, 0xe4, 0x0c, 0x50, 0xfa, 0xe8, 0xd6, 0x99, - 0xa9, 0x58, 0x61, 0x38, 0xee, 0x22, 0x3c, 0x53, 0xcf, 0x64, 0x8e, 0xad, - 0x4d, 0xd6, 0xc3, 0xc3, 0xdd, 0xb0, 0xb3, 0xf7, 0xdd, 0x37, 0xfd, 0xf3, - 0x2b, 0x6a, 0xe2, 0xd4, 0xfc, 0x0c, 0x74, 0xca, 0x37, 0x2f, 0xd2, 0xf8, - 0x5b, 0xf1, 0x8c, 0x32, 0xa0, 0xdc, 0x2c, 0xa8, 0x36, 0x2f, 0xbe, 0x45, - 0x9b, 0x42, 0x95, 0x15, 0x5e, 0x08, 0xb1, 0x61, 0xec, 0xa2, 0xdf, 0x5f, - 0xca, 0xf8, 0x62, 0x73, 0xfd, 0x66, 0xc8, 0x51, 0x2a, 0x69, 0x3c, 0x8f, - 0x75, 0xa4, 0x6f, 0xbe, 0xc1, 0x5c, 0x66, 0xe2, 0x60, 0x92, 0xd7, 0x0e, - 0xee, 0x1b, 0xc7, 0x39, 0x8b, 0x56, 0x6c, 0xc6, 0x20, 0xfa, 0xec, 0x96, - 0xa5, 0x0f, 0x74, 0x42, 0x32, 0x12, 0x11, 0xdf, 0x02, 0xfe, 0x42, 0x1c, - 0xfe, 0xf1, 0x72, 0xaf, 0x47, 0x3b, 0x62, 0xe3, 0x27, 0x29, 0xf0, 0xec, - 0x39, 0xd2, 0xdd, 0xb6, 0xe9, 0xbe, 0x5f, 0x66, 0x67, 0x6c, 0xc9, 0xa1, - 0xf0, 0x25, 0x9a, 0x1b, 0xa8, 0xa0, 0x15, 0xcb, 0x61, 0x98, 0x98, 0xfd, - 0xef, 0xba, 0x74, 0x9b, 0x54, 0xf3, 0x6d, 0xe1, 0xa4, 0xcf, 0xb5, 0xe7, - 0xba, 0x0f, 0xd1, 0x41, 0xd8, 0x63, 0x94, 0x09, 0xcd, 0x4f, 0xb1, 0x31, - 0x49, 0x5e, 0x54, 0xb1, 0x28, 0x39, 0x8e, 0x13, 0x48, 0x2e, 0x20, 0xb0, - 0xf7, 0x18, 0x9a, 0xea, 0xf2, 0x9b, 0xde, 0x8f, 0x16, 0xc8, 0x9e, 0x31, - 0xca, 0x94, 0x28, 0x26, 0x0d, 0x8c, 0x0f, 0x09, 0x69, 0xc5, 0x2a, 0x38, - 0xae, 0x6b, 0xfb, 0x4f, 0xbb, 0xf4, 0x14, 0xea, 0x8d, 0x13, 0xc0, 0x09, - 0xe2, 0xfb, 0xfb, 0x09, 0xa1, 0xfc, 0x49, 0xff, 0x0f, 0x52, 0x3e, 0xe8, - 0xda, 0xfe, 0xe1, 0x67, 0x8f, 0x21, 0xcf, 0xaf, 0xb7, 0xe2, 0xcf, 0x09, - 0x15, 0x10, 0x51, 0x72, 0x8f, 0x42, 0x09, 0x9d, 0xea, 0x27, 0x2d, 0x25, - 0x9f, 0x54, 0x50, 0xfa, 0xdf, 0x9f, 0x41, 0xe8, 0xd2, 0x66, 0xd8, 0x28, - 0xfb, 0x8b, 0xe4, 0x42, 0x03, 0x92, 0xf9, 0xcd, 0xcc, 0xb0, 0xc0, 0x52, - 0x53, 0x6d, 0xcd, 0xed, 0x16, 0xad, 0x3c, 0x3d, 0xf9, 0x3b, 0x05, 0xbb, - 0xac, 0x9e, 0xa3, 0x4b, 0x17, 0xb4, 0xc7, 0xdd, 0xd4, 0xd3, 0x0c, 0x10, - 0x0d, 0xd8, 0x9c, 0xdb, 0xa4, 0x60, 0x06, 0x89, 0x4b, 0x06, 0x4c, 0x9f, - 0xc4, 0x47, 0xc8, 0xaf, 0xab, 0x02, 0x23, 0x89, 0x6e, 0xf2, 0x9d, 0x2b, - 0x6b, 0x9a, 0xa4, 0xee, 0x16, 0x0b, 0x3c, 0x76, 0xd4, 0xf0, 0x17, 0x90, - 0xca, 0xf5, 0xc8, 0xbf, 0xcb, 0xb1, 0x02, 0x69, 0x34, 0x71, 0x59, 0x5d, - 0x0e, 0x56, 0xd8, 0x41, 0x0a, 0xa5, 0x0a, 0x16, 0xbc, 0x93, 0x63, 0xf9, - 0xd9, 0xab, 0x3e, 0x75, 0x1e, 0xd3, 0xf3, 0x56, 0xf5, 0x14, 0xee, 0x65, - 0xf3, 0x2f, 0x72, 0x03, 0xcb, 0x69, 0x90, 0x91, 0x0d, 0x31, 0x8e, 0x3e, - 0xe9, 0xb0, 0xe6, 0x2e, 0x37, 0x5d, 0xb0, 0x38, 0x52, 0xe6, 0x23, 0x24, - 0x36, 0xb2, 0xe9, 0xa5, 0xa0, 0xae, 0xed, 0xfd, 0x95, 0xa5, 0xcf, 0x4a, - 0xe3, 0xbd, 0xe7, 0x29, 0xd0, 0x57, 0x3e, 0xf1, 0xdf, 0xc8, 0xc7, 0x26, - 0xf6, 0xc7, 0x4b, 0xc8, 0x6a, 0x4a, 0xed, 0x49, 0x60, 0x2d, 0x1c, 0xe3, - 0x8b, 0x10, 0x24, 0xfc, 0xef, 0xbb, 0x1e, 0x24, 0xbb, 0x40, 0xeb, 0x99, - 0xba, 0xe1, 0x4a, 0xd4, 0x1f, 0x69, 0x47, 0xa4, 0x8f, 0x48, 0x05, 0x17, - 0xcb, 0xee, 0x55, 0xca, 0xe5, 0xe3, 0x60, 0xec, 0xfa, 0xe6, 0xd1, 0x28, - 0xc5, 0xa8, 0x04, 0xd8, 0xce, 0x13, 0x2b, 0x99, 0x2b, 0xc7, 0x94, 0x9d, - 0xda, 0xd7, 0x6f, 0x31, 0xfe, 0xee, 0x6c, 0x9b, 0xf1, 0x70, 0xd2, 0xee, - 0xc4, 0xba, 0xb7, 0xbe, 0xd3, 0x37, 0xdc, 0x43, 0x4e, 0x30, 0x4a, 0x67, - 0xf2, 0x45, 0x29, 0xe1, 0x8b, 0xb8, 0x6d, 0xca, 0xec, 0xb9, 0xd6, 0xd3, - 0xdd, 0xcb, 0xde, 0xdb, 0xa9, 0x4d, 0xdd, 0x3d, 0x41, 0xae, 0x99, 0x89, - 0xce, 0x70, 0x50, 0x61, 0x07, 0xf3, 0xca, 0x24, 0x56, 0x76, 0x3f, 0xe0, - 0x6e, 0xbe, 0xa7, 0xc6, 0xac, 0x6c, 0xf1, 0x8c, 0xa2, 0x0e, 0xc4, 0x2a, - 0x48, 0x30, 0x8b, 0xc9, 0xc0, 0x5a, 0xb2, 0x2b, 0xbd, 0xa2, 0xcc, 0xf7, - 0x25, 0x16, 0xc3, 0xde, 0x1b, 0x8d, 0x23, 0x8c, 0xb6, 0xc4, 0xaa, 0x4a, - 0x0b, 0x66, 0x25, 0x35, 0xb3, 0x9a, 0x74, 0x27, 0x63, 0xea, 0xef, 0x92, - 0x12, 0x8c, 0x58, 0xd9, 0x3a, 0x55, 0xd6, 0x61, 0x29, 0x9f, 0xbc, 0x28, - 0xbd, 0x30, 0xcd, 0x43, 0xe6, 0x36, 0x36, 0x66, 0x20, 0x8c, 0x9e, 0x23, - 0xfe, 0x6d, 0xf0, 0xbc, 0x61, 0xcd, 0x58, 0xd8, 0xe0, 0x2e, 0xe4, 0xcf, - 0x61, 0xf7, 0xd5, 0x6b, 0x54, 0x33, 0xb3, 0x2c, 0x60, 0xa8, 0x59, 0x21, - 0x5d, 0xaa, 0x65, 0x9e, 0xdc, 0xa3, 0xc9, 0xc4, 0x9d, 0x4d, 0x95, 0x29, - 0xf6, 0x2b, 0xcd, 0xc9, 0xb9, 0x9d, 0x46, 0xa0, 0x89, 0xf4, 0x4e, 0x52, - 0x55, 0xe2, 0x13, 0x98, 0xf0, 0xef, 0x27, 0xc3, 0xc9, 0xd1, 0xe1, 0xee, - 0x07, 0x1b, 0x9d, 0x8a, 0x5b, 0x9d, 0x06, 0x26, 0x61, 0x2a, 0x55, 0x6f, - 0x54, 0x22, 0xd5, 0x06, 0x20, 0xed, 0x06, 0x4d, 0xa2, 0xb3, 0xaa, 0x4f, - 0x1f, 0x3e, 0xd2, 0x0d, 0x6a, 0xab, 0x6d, 0xee, 0x8f, 0x09, 0xb2, 0xd9, - 0x39, 0x46, 0x0f, 0xe7, 0x51, 0x70, 0x51, 0xdb, 0x09, 0xf8, 0x8e, 0xbb, - 0x06, 0x98, 0x49, 0x69, 0xb7, 0x9e, 0xa0, 0xbc, 0x16, 0x5f, 0x96, 0xad, - 0xe9, 0x76, 0x9f, 0x71, 0xe2, 0x1b, 0x91, 0x73, 0xd9, 0x74, 0x6a, 0x70, - 0x48, 0x71, 0x47, 0x3b, 0x0c, 0xd5, 0x96, 0xe3, 0x6e, 0xdb, 0xbb, 0x9c, - 0x44, 0x5c, 0xe5, 0x07, 0x73, 0x31, 0xd1, 0x55, 0x07, 0xff, 0x5f, 0xb1, - 0x55, 0x9d, 0x0d, 0xbf, 0x32, 0x53, 0xf9, 0xfe, 0xcd, 0xc8, 0xe0, 0x56, - 0x18, 0x8f, 0x4b, 0x51, 0xd1, 0x23, 0x2e, 0x9f, 0xb9, 0xee, 0xf3, 0xfd, - 0x26, 0x02, 0xf6, 0x54, 0xd5, 0x3e, 0x13, 0xc1, 0xc1, 0xe4, 0xa8, 0xb4, - 0x5f, 0x5c, 0xa0, 0x9f, 0xb5, 0x19, 0xbb, 0x4e, 0xd6, 0xf8, 0x18, 0x9b, - 0xeb, 0x9e, 0x58, 0x9d, 0x00, 0x51, 0x24, 0x28, 0x70, 0x55, 0xf7, 0xb9, - 0x5a, 0x59, 0x50, 0xc5, 0x72, 0xab, 0x6b, 0x13, 0x95, 0xfb, 0xe4, 0xc2, - 0x05, 0x96, 0xf3, 0x48, 0xef, 0x02, 0x67, 0xd5, 0x8f, 0x5b, 0x8e, 0xb6, - 0xbe, 0xc1, 0x3d, 0x8e, 0x22, 0xee, 0x49, 0xc7, 0xbe, 0xfb, 0x2d, 0x51, - 0x45, 0x44, 0xca, 0x94, 0x8e, 0xce, 0xb5, 0x9a, 0x29, 0xc7, 0x52, 0xde, - 0x2c, 0xdf, 0xcc, 0x43, 0xc7, 0xd7, 0x51, 0xb7, 0x07, 0xf0, 0x9b, 0x9d, - 0x33, 0x98, 0x62, 0xfa, 0xc9, 0x13, 0x0b, 0xcd, 0xdf, 0xbd, 0xff, 0x8e, - 0x13, 0x44, 0xda, 0x62, 0xc0, 0xd1, 0x8d, 0x57, 0x0e, 0xec, 0x53, 0x8a, - 0x04, 0xcf, 0x0f, 0x5a, 0xd7, 0x3c, 0x4b, 0x17, 0xda, 0x3b, 0xf0, 0x30, - 0xbf, 0xea, 0x40, 0xa6, 0x36, 0xed, 0xda, 0xf7, 0x40, 0x6b, 0xf1, 0x1e, - 0x61, 0xa0, 0x8b, 0x5d, 0xfa, 0xa8, 0x6a, 0xca, 0xfd, 0x6a, 0x06, 0xb4, - 0xf5, 0xb6, 0xc7, 0xbe, 0xdf, 0xac, 0x17, 0x00, 0x4a, 0x91, 0x8d, 0x97, - 0x5b, 0xc8, 0xcb, 0xd4, 0xc8, 0x20, 0x0b, 0x53, 0xee, 0x2b, 0x25, 0xb8, - 0xa1, 0x24, 0xa1, 0xa0, 0x17, 0x60, 0xd9, 0xf7, 0x2d, 0x00, 0x6c, 0x70, - 0x44, 0x0d, 0x60, 0xe7, 0x95, 0x1e, 0x8a, 0x1b, 0x29, 0xcf, 0xb5, 0xc1, - 0xbe, 0xd0, 0xe5, 0xeb, 0xd8, 0x71, 0x88, 0x34, 0xcb, 0xbd, 0x32, 0x52, - 0xa7, 0xcf, 0x6d, 0x9b, 0xef, 0xf2, 0xe4, 0x68, 0x6f, 0xfe, 0xb9, 0x17, - 0x31, 0xa0, 0x3e, 0xfc, 0xae, 0xf6, 0x54, 0xe3, 0x33, 0x24, 0xd1, 0xfc, - 0xb7, 0x37, 0x8f, 0xd3, 0x4f, 0xf2, 0x59, 0x53, 0xea, 0xaf, 0x71, 0xc5, - 0xb1, 0xdb, 0xf9, 0xed, 0xc0, 0x46, 0x56, 0xfc, 0x09, 0x90, 0xf7, 0x09, - 0x5a, 0x12, 0x71, 0xad, 0xa6, 0x0f, 0xba, 0x4c, 0x2f, 0xd7, 0x61, 0xcb, - 0xf2, 0xab, 0x44, 0x67, 0x43, 0xd0, 0x41, 0xd5, 0xba, 0xff, 0x26, 0x50, - 0x5b, 0x97, 0x91, 0xc4, 0x8f, 0x2a, 0x64, 0x3c, 0x06, 0x2e, 0x26, 0x8e, - 0x5f, 0xb1, 0xba, 0x74, 0x16, 0xeb, 0xee, 0x6e, 0xe1, 0x68, 0xcc, 0x09, - 0xed, 0xa5, 0x5d, 0xf7, 0xef, 0xd6, 0xfa, 0x9f, 0x39, 0xe1, 0x5c, 0x38, - 0xbd, 0x1b, 0xe6, 0x8a, 0xfa, 0xea, 0xbc, 0x14, 0x4c, 0x31, 0xa8, 0x9d, - 0x64, 0xa6, 0xec, 0xf0, 0xf8, 0xa2, 0x0a, 0x6c, 0xb9, 0xc5, 0x3d, 0x40, - 0x48, 0x41, 0x1d, 0xf2, 0xab, 0xd4, 0xdf, 0xfb, 0x55, 0x9e, 0xa5, 0xac, - 0xe9, 0xf0, 0x46, 0x96, 0xc5, 0x4d, 0x5f, 0x5f, 0x64, 0x00, 0x69, 0x48, - 0x0e, 0xa3, 0xb5, 0x5d, 0x45, 0xce, 0x57, 0xc4, 0x45, 0xdb, 0xc6, 0x13, - 0x4b, 0xa7, 0xa0, 0xd5, 0x31, 0xb4, 0xd4, 0x0f, 0x4f, 0x29, 0x40, 0xc0, - 0xaa, 0xb7, 0x54, 0x21, 0xd5, 0x3a, 0x01, 0xbc, 0xa8, 0x58, 0xb5, 0x3f, - 0xa6, 0x1a, 0x06, 0xb5, 0x07, 0xd3, 0xb6, 0xff, 0x6e, 0x74, 0x08, 0x16, - 0x45, 0xaf, 0xd9, 0xc5, 0x4a, 0x0d, 0xd2, 0x8a, 0xd1, 0x6c, 0xba, 0x5a, - 0xd0, 0xee, 0x57, 0x10, 0xa4, 0x1a, 0xf4, 0x92, 0x97, 0xe0, 0xd7, 0xa8, - 0xff, 0x47, 0xed, 0x56, 0x6b, 0x91, 0x77, 0x5d, 0xa6, 0xcf, 0xed, 0x96, - 0xc5, 0x5a, 0xe3, 0x0b, 0x1d, 0xc0, 0xcc, 0xa1, 0x71, 0x95, 0xa8, 0xec, - 0xef, 0x33, 0x91, 0xd6, 0x53, 0x1f, 0xef, 0x43, 0xa9, 0x42, 0x2a, 0xc7, - 0xf6, 0x15, 0x60, 0xc2, 0xde, 0xeb, 0xac, 0xf8, 0x55, 0x27, 0x14, 0xf1, - 0xf8, 0x69, 0x55, 0xc8, 0x69, 0x1f, 0xf3, 0xc2, 0x71, 0xe8, 0x75, 0xa9, - 0x1a, 0x91, 0xc5, 0x1e, 0xe3, 0x52, 0x24, 0x5f, 0x60, 0xb5, 0xf1, 0xe6, - 0xdd, 0x4b, 0x1b, 0xdd, 0x3a, 0xad, 0x58, 0x36, 0x9c, 0xb3, 0x25, 0x9e, - 0x28, 0xd4, 0x3b, 0x6a, 0x64, 0xe7, 0x57, 0x54, 0xad, 0x4d, 0x44, 0xfc, - 0x54, 0xd3, 0xa3, 0x96, 0x4e, 0xee, 0xde, 0x23, 0x30, 0x30, 0x1f, 0x57, - 0x2f, 0xd6, 0xb4, 0xfa, 0x5c, 0x1b, 0x4a, 0x1b, 0x96, 0x58, 0x9a, 0xc7, - 0x25, 0xd0, 0x9c, 0xf3, 0x2b, 0x16, 0x58, 0x62, 0x0c, 0x5b, 0x45, 0x96, - 0xb0, 0xc2, 0x3e, 0xca, 0x0a, 0xb5, 0x0f, 0x06, 0xa8, 0xa3, 0xb2, 0x0a, - 0x6a, 0xc5, 0xb7, 0xf8, 0x69, 0xfa, 0xc1, 0xa8, 0xbc, 0x17, 0x6c, 0x92, - 0x06, 0x50, 0x74, 0x4b, 0x02, 0xc8, 0x4d, 0x9c, 0x3e, 0x94, 0x6f, 0xef, - 0x3e, 0xd9, 0x71, 0xa6, 0x3a, 0x70, 0x6a, 0x14, 0x0e, 0x06, 0xbe, 0x40, - 0x2b, 0xa1, 0xbb, 0x05, 0x71, 0x05, 0xbd, 0xd5, 0x2d, 0xd9, 0xe2, 0xf6, - 0xb4, 0x32, 0x33, 0xac, 0x0f, 0x9a, 0xe3, 0xaf, 0xf4, 0x44, 0x21, 0x59, - 0x91, 0x0d, 0xd0, 0xf1, 0x47, 0x9e, 0x00, 0x38, 0xa2, 0x1d, 0x61, 0x54, - 0xd2, 0x18, 0x9d, 0xe4, 0x4f, 0xf3, 0xbd, 0x04, 0xdb, 0x4d, 0x59, 0x8c, - 0xfa, 0x12, 0xdd, 0xe4, 0xb5, 0x32, 0x3b, 0xf8, 0x93, 0xae, 0x3b, 0xa9, - 0xb3, 0xe9, 0x57, 0x30, 0x49, 0x6d, 0xaa, 0x35, 0x12, 0xce, 0x16, 0x98, - 0x3c, 0xd0, 0xed, 0xe8, 0xa6, 0xbc, 0xa6, 0xe6, 0x66, 0x0f, 0xb3, 0x12, - 0x95, 0x19, 0x56, 0x23, 0xb1, 0x30, 0x5d, 0xb3, 0x4c, 0x5f, 0x0c, 0xef, - 0x24, 0x12, 0xe0, 0x97, 0xf3, 0x3e, 0x9c, 0x49, 0xff, 0xa6, 0x6f, 0xa6, - 0xd2, 0x58, 0xbe, 0x3f, 0x30, 0xdd, 0x65, 0xd0, 0x40, 0xe1, 0xaf, 0x09, - 0xf1, 0xf4, 0x0f, 0x1a, 0xe5, 0xef, 0x51, 0x50, 0x38, 0x5d, 0xb0, 0x1e, - 0xed, 0x19, 0x8d, 0x4e, 0x20, 0xa1, 0x65, 0x07, 0x5b, 0x23, 0x0c, 0x14, - 0xd3, 0x18, 0xa3, 0xda, 0x58, 0x9f, 0x10, 0x00, 0xbd, 0xb5, 0x95, 0x07, - 0x1d, 0x0f, 0xf9, 0x2a, 0xe4, 0x35, 0x3c, 0x60, 0xad, 0xb2, 0x13, 0x3b, - 0xd5, 0x9e, 0xeb, 0xc7, 0x09, 0x6e, 0x53, 0xff, 0x95, 0xf3, 0xc1, 0x9b, - 0xcd, 0x21, 0x15, 0x3b, 0x5f, 0xfe, 0x4e, 0xaf, 0x3f, 0xf8, 0xe3, 0xa8, - 0x35, 0xee, 0x44, 0x33, 0xc7, 0x8c, 0x9c, 0x1c, 0x33, 0x55, 0x3c, 0x4a, - 0xa4, 0x35, 0xf6, 0xf0, 0x32, 0x8e, 0xed, 0x6d, 0x06, 0xff, 0x8d, 0x24, - 0x05, 0x72, 0x4c, 0xa2, 0x97, 0x25, 0x93, 0x3d, 0x79, 0x18, 0x22, 0x15, - 0xec, 0x5c, 0xc4, 0x10, 0x65, 0xec, 0x90, 0x6d, 0x28, 0xba, 0x93, 0xb5, - 0x2f, 0x53, 0xe4, 0x00, 0x9c, 0x39, 0xf5, 0x4c, 0xde, 0x51, 0x39, 0xc3, - 0xd8, 0x03, 0xc3, 0x97, 0xe1, 0xa8, 0x3e, 0x06, 0x26, 0x4d, 0xd9, 0x49, - 0x75, 0xbb, 0xd5, 0x69, 0x20, 0xfb, 0x85, 0x12, 0xc9, 0xac, 0xfc, 0x05, - 0xad, 0x57, 0xa9, 0x58, 0xcd, 0xfd, 0xbe, 0x64, 0x31, 0x50, 0x4d, 0xa4, - 0x93, 0xb6, 0x23, 0x3b, 0xfd, 0xd9, 0xdb, 0x46, 0xdd, 0x1f, 0x07, 0x54, - 0xc2, 0xc2, 0xd6, 0xad, 0xf6, 0x21, 0x39, 0xa1, 0x96, 0x53, 0x12, 0x46, - 0x5a, 0xc8, 0xf3, 0xf8, 0xe2, 0xa3, 0xd0, 0x29, 0x3f, 0x30, 0xca, 0x0b, - 0x57, 0xab, 0xcf, 0x1e, 0x08, 0x59, 0x3d, 0x41, 0x6a, 0xf7, 0xb2, 0xfc, - 0xff, 0x33, 0x46, 0xd1, 0x1a, 0xa6, 0x91, 0x54, 0xca, 0x27, 0x5a, 0x94, - 0x13, 0xf4, 0xf0, 0xcf, 0x58, 0xe0, 0x96, 0x50, 0xda, 0xe6, 0x91, 0xc7, - 0x8d, 0x14, 0x5b, 0xc1, 0xeb, 0x4a, 0x96, 0xf1, 0xa5, 0x43, 0xf6, 0x29, - 0x91, 0xb9, 0xb9, 0x67, 0x3f, 0x31, 0xd7, 0x08, 0xe6, 0x2b, 0xfb, 0x43, - 0x56, 0x39, 0x4e, 0xf9, 0x02, 0x8e, 0x96, 0x1f, 0xa3, 0x3c, 0xae, 0x55, - 0x03, 0x05, 0x9a, 0x39, 0xbe, 0xf7, 0x67, 0xa1, 0x6b, 0x2f, 0x42, 0x45, - 0x9b, 0x45, 0x8f, 0x53, 0x1f, 0x96, 0x42, 0x54, 0xd2, 0x5b, 0xf0, 0x17, - 0x94, 0x41, 0xaf, 0xd4, 0xc6, 0x37, 0x5f, 0xc0, 0xbd, 0xe3, 0x44, 0x8d, - 0xc1, 0x69, 0x64, 0x2a, 0xe7, 0x08, 0xe5, 0x18, 0x92, 0x53, 0xfc, 0xed, - 0xd3, 0x69, 0x94, 0x6b, 0x10, 0x0b, 0x5e, 0x91, 0x38, 0x4b, 0xa5, 0x19, - 0x3a, 0x6a, 0x2e, 0x5a, 0xa2, 0x6f, 0x34, 0x2c, 0x7b, 0x5d, 0x53, 0x33, - 0x77, 0x46, 0xf8, 0x4a, 0xa2, 0x8d, 0x55, 0x67, 0xa8, 0xbd, 0xc6, 0x3c, - 0x5d, 0x47, 0xeb, 0x99, 0xed, 0xdc, 0xae, 0xcf, 0xec, 0xbe, 0x40, 0x60, - 0xfc, 0x36, 0x5c, 0x93, 0x95, 0x64, 0xd8, 0x47, 0x14, 0xe2, 0x1e, 0xa2, - 0xd4, 0xd4, 0xdf, 0xd9, 0x23, 0x18, 0xf2, 0x99, 0xe8, 0xe4, 0x2a, 0x3b, - 0xec, 0x2e, 0x28, 0xa8, 0x04, 0x74, 0x04, 0xa4, 0x32, 0xa6, 0x49, 0xf9, - 0x33, 0x6c, 0xa8, 0x1d, 0xb2, 0xbb, 0x57, 0xe4, 0xcf, 0xf2, 0x9e, 0x74, - 0x8d, 0xf7, 0x22, 0xaa, 0x0d, 0x8a, 0x2f, 0x34, 0x72, 0x33, 0xec, 0xdf, - 0x46, 0x57, 0x6c, 0x97, 0x94, 0xad, 0x06, 0x88, 0xeb, 0x20, 0xec, 0x79, - 0x44, 0xe1, 0xbc, 0xf8, 0xbd, 0xeb, 0x99, 0xe3, 0xaf, 0xfe, 0xc5, 0xb5, - 0xfa, 0x31, 0x75, 0x62, 0xff, 0x2a, 0x2a, 0x1b, 0xce, 0xad, 0xa8, 0xc8, - 0x3c, 0x54, 0x23, 0xf9, 0x9e, 0x2d, 0xe2, 0xa4, 0x4f, 0x5b, 0x4d, 0xb8, - 0x4f, 0xc6, 0xb3, 0xc6, 0xef, 0x66, 0x54, 0x31, 0xab, 0xd3, 0xf0, 0xb9, - 0xfa, 0xb6, 0x15, 0xe6, 0xdb, 0x4b, 0x51, 0x4d, 0x77, 0xa5, 0x3d, 0x4e, - 0xd9, 0xc9, 0xdb, 0x95, 0x31, 0x1d, 0x4d, 0x37, 0xe0, 0x34, 0xd3, 0xf3, - 0x20, 0x6b, 0xb8, 0x16, 0x0b, 0x4e, 0x55, 0x96, 0x56, 0x1e, 0xa7, 0xe8, - 0xc6, 0x3a, 0x08, 0x49, 0xa1, 0x16, 0x46, 0xc9, 0x43, 0xcb, 0x8f, 0x28, - 0x4a, 0x78, 0xaa, 0xf9, 0x6c, 0x74, 0xc8, 0x0b, 0xce, 0x13, 0x2c, 0xef, - 0xfe, 0x73, 0x42, 0xa7, 0xbc, 0x3d, 0xc9, 0xf2, 0xaf, 0x1c, 0x32, 0xdb, - 0xb2, 0x15, 0x70, 0x6b, 0x9b, 0x6e, 0x6f, 0x6e, 0xf7, 0x95, 0xea, 0x3e, - 0xd0, 0xb1, 0x2a, 0xbe, 0x8c, 0x66, 0x4e, 0xe9, 0x29, 0xe3, 0x35, 0xde, - 0xbf, 0x44, 0xbc, 0x5e, 0x56, 0x8b, 0xb3, 0xd4, 0xdf, 0xf5, 0x4e, 0x2e, - 0xeb, 0xe6, 0x8e, 0x58, 0xe2, 0xfd, 0xe7, 0x27, 0xff, 0x07, 0x49, 0x20, - 0xdd, 0xcf, 0xe4, 0xd7, 0x5c, 0x5f, 0x1f, 0xcc, 0xeb, 0x29, 0xeb, 0x34, - 0xac, 0xd6, 0xb6, 0xf8, 0xae, 0xdf, 0x11, 0x58, 0xd5, 0xea, 0xf1, 0x76, - 0xe5, 0x4d, 0x51, 0x72, 0xd4, 0x5e, 0x1e, 0x0f, 0xfd, 0x2e, 0xbe, 0x8e, - 0x07, 0x1a, 0x1f, 0x99, 0x4d, 0x73, 0x70, 0xe1, 0x41, 0xb4, 0x20, 0x10, - 0x75, 0x0f, 0xc8, 0x69, 0x5f, 0x6c, 0x20, 0x2b, 0xc8, 0xfd, 0xe9, 0x4c, - 0xf4, 0x6f, 0x6a, 0xe0, 0x1a, 0xb5, 0xec, 0x2e, 0xf5, 0x25, 0x6d, 0x56, - 0x56, 0xb9, 0x42, 0xca, 0x70, 0x72, 0xe5, 0x41, 0x07, 0x4f, 0x41, 0x25, - 0xea, 0x0a, 0x5d, 0xe1, 0x0a, 0xd5, 0x6f, 0x35, 0x50, 0xcc, 0x27, 0x53, - 0x5f, 0x31, 0x1c, 0xee, 0xae, 0x26, 0xc8, 0xc4, 0x4f, 0x9b, 0xf5, 0xf6, - 0x4d, 0x19, 0xb9, 0xc4, 0x55, 0xcd, 0xe5, 0x8a, 0xe9, 0x45, 0xec, 0xf2, - 0xf9, 0x33, 0x4d, 0xba, 0x57, 0x8f, 0xd6, 0xf5, 0xf7, 0x92, 0xb3, 0xd3, - 0x65, 0x39, 0x07, 0x04, 0x92, 0x2f, 0x70, 0x99, 0x97, 0x96, 0x60, 0xe5, - 0x92, 0x60, 0xc3, 0x72, 0x1e, 0xc7, 0xe6, 0x1d, 0xbb, 0x5b, 0xd5, 0x64, - 0x1b, 0x36, 0x45, 0xb8, 0xcb, 0x42, 0xe7, 0x26, 0x45, 0x65, 0xc8, 0x04, - 0x1c, 0x05, 0x9b, 0x48, 0xe3, 0x93, 0x8e, 0xb2, 0x1c, 0x6a, 0xab, 0x60, - 0xc2, 0xa6, 0x1a, 0x71, 0xd5, 0x2c, 0xb8, 0xe9, 0x9e, 0x66, 0x8d, 0xb6, - 0xb1, 0x99, 0x90, 0x9c, 0x1b, 0xc9, 0x44, 0x6d, 0x31, 0xbb, 0x62, 0x6e, - 0x46, 0xcc, 0xd7, 0x47, 0x3a, 0x40, 0x63, 0x33, 0x34, 0x4f, 0x50, 0x3c, - 0x94, 0x97, 0xe9, 0xe8, 0x3a, 0xf7, 0x2d, 0x2d, 0x9c, 0xb6, 0x5d, 0x52, - 0xbd, 0xa9, 0x2d, 0x42, 0xfc, 0xe8, 0x70, 0x09, 0x48, 0xd0, 0x36, 0x0b, - 0x3d, 0x2b, 0x9f, 0xe2, 0x4c, 0xdf, 0xf3, 0x57, 0x73, 0x55, 0xf7, 0x34, - 0xb8, 0x6b, 0x44, 0x6f, 0xf6, 0x6d, 0xcf, 0x93, 0x09, 0x14, 0xac, 0x8f, - 0xde, 0xce, 0x5f, 0x05, 0x04, 0x9f, 0xc7, 0x05, 0x5f, 0xdd, 0x2e, 0xfc, - 0x53, 0xec, 0x9e, 0xdb, 0xa8, 0xa2, 0xc7, 0x53, 0x5c, 0x9a, 0x4d, 0xb6, - 0x6f, 0xa5, 0xc6, 0xf3, 0xc5, 0xa4, 0x56, 0x62, 0xdc, 0x75, 0xe4, 0x0b, - 0xb0, 0xcc, 0x38, 0xde, 0x2d, 0xbb, 0xbc, 0x0b, 0xc6, 0xab, 0xac, 0xac, - 0x46, 0xce, 0x1e, 0xe6, 0x47, 0x6c, 0x6e, 0x8e, 0x00, 0x00, 0xa0, 0xae, - 0x1e, 0x1d, 0xaa, 0x22, 0xaf, 0x34, 0xc7, 0x26, 0x37, 0x01, 0x46, 0x25, - 0x9c, 0x5f, 0x92, 0xef, 0xda, 0x07, 0x64, 0x62, 0xe4, 0xf7, 0x4c, 0xa2, - 0x41, 0xf1, 0x10, 0xe0, 0xe5, 0x73, 0x72, 0xe1, 0xf8, 0x66, 0x19, 0x58, - 0xa9, 0xdf, 0xb1, 0x41, 0xcb, 0xb3, 0xc4, 0xe6, 0x21, 0xbe, 0x17, 0x26, - 0xa9, 0x68, 0x96, 0xde, 0x5d, 0xba, 0x8f, 0x1b, 0x09, 0x00, 0x39, 0x0e, - 0xc2, 0x8d, 0x31, 0x61, 0xfe, 0x9e, 0x60, 0x05, 0xf3, 0x72, 0xdf, 0x78, - 0x14, 0x5a, 0x1b, 0x74, 0xa1, 0x23, 0xa7, 0x6e, 0x93, 0x76, 0xfa, 0x4a, - 0x73, 0xa1, 0x3b, 0xda, 0x0b, 0x06, 0xdd, 0xfc, 0x2f, 0xef, 0x0a, 0x38, - 0x03, 0xbf, 0xbb, 0x12, 0x29, 0x6b, 0xec, 0x68, 0xc7, 0xa6, 0xf9, 0x72, - 0xbc, 0xdb, 0xeb, 0x4e, 0x8f, 0x5f, 0x3a, 0xa9, 0x06, 0x4e, 0x3c, 0xf4, - 0x3b, 0xe0, 0x98, 0x9b, 0x77, 0x57, 0x0f, 0x39, 0x08, 0x43, 0x3f, 0x9b, - 0x76, 0x11, 0xd3, 0x38, 0xb6, 0x1f, 0x1e, 0xfe, 0xbb, 0x16, 0x37, 0x24, - 0x15, 0xf7, 0x8e, 0x61, 0x3d, 0xf5, 0x60, 0xab, 0x46, 0x49, 0xd6, 0xb2, - 0x8e, 0x35, 0xd5, 0x66, 0x20, 0x1f, 0xad, 0xf5, 0x95, 0xc3, 0x3e, 0xaa, - 0xda, 0x12, 0x1f, 0x33, 0xf4, 0xc0, 0xd9, 0x9e, 0x09, 0x76, 0x8b, 0x2f, - 0x35, 0xe2, 0x58, 0x09, 0x36, 0xf1, 0x03, 0xbc, 0xc2, 0x54, 0x67, 0x29, - 0x00, 0x3b, 0xf0, 0x24, 0xdf, 0xa0, 0x92, 0x71, 0xc3, 0x98, 0xe8, 0x5d, - 0xbe, 0xc7, 0xe8, 0x6f, 0x2f, 0x05, 0x89, 0x9f, 0xa1, 0x63, 0x29, 0x12, - 0x94, 0xff, 0xc7, 0x4c, 0xec, 0x98, 0x0e, 0xb8, 0xeb, 0x9e, 0x6d, 0x1e, - 0x4f, 0x4a, 0x1e, 0x41, 0xb0, 0xf9, 0x40, 0x8b, 0xdd, 0xd9, 0xa6, 0x1b, - 0xd4, 0x6d, 0xaf, 0x5b, 0x14, 0x68, 0xfd, 0x96, 0x5d, 0x0d, 0xad, 0x46, - 0x03, 0xf8, 0xd7, 0x13, 0x1d, 0xf3, 0x47, 0xbe, 0x46, 0x3d, 0xc7, 0xdd, - 0xa9, 0x60, 0x05, 0x15, 0xef, 0x9d, 0xa4, 0xb8, 0xde, 0xf2, 0x41, 0xe2, - 0x07, 0x1d, 0xcb, 0xe8, 0xf3, 0x9c, 0x9c, 0x5e, 0xcd, 0xec, 0x53, 0x39, - 0xf2, 0x62, 0x3b, 0x69, 0x3a, 0x29, 0xc7, 0xb3, 0x57, 0xce, 0x58, 0xd6, - 0x55, 0xf8, 0xc2, 0xf1, 0x16, 0xf3, 0x33, 0x3f, 0xf2, 0xaa, 0x63, 0x42, - 0x27, 0x01, 0x22, 0x5a, 0x1e, 0x8d, 0xa5, 0x33, 0x34, 0x29, 0x12, 0xf6, - 0x07, 0x22, 0xfd, 0xbb, 0x72, 0x60, 0x2a, 0xf5, 0xec, 0x71, 0xfe, 0xd7, - 0xc1, 0xf5, 0xdf, 0x97, 0x3e, 0x4a, 0x9a, 0x97, 0x6f, 0x56, 0xf1, 0xd4, - 0xba, 0x29, 0x09, 0x46, 0x3f, 0x10, 0xdc, 0x2d, 0xb2, 0x04, 0x32, 0x38, - 0xa3, 0xc7, 0x75, 0x95, 0x16, 0xd6, 0x12, 0x44, 0x7a, 0xd3, 0x18, 0xb3, - 0x51, 0x72, 0x63, 0xb8, 0xae, 0x9b, 0xf1, 0xec, 0x17, 0xe4, 0x2d, 0xed, - 0x29, 0x05, 0x63, 0xd7, 0x01, 0xf4, 0xf5, 0xc1, 0x6d, 0x13, 0x5f, 0x5c, - 0x73, 0x11, 0xc9, 0x53, 0xf4, 0xda, 0x90, 0xa2, 0x1c, 0x0b, 0x1d, 0x37, - 0x28, 0xa1, 0x06, 0x65, 0xd3, 0x49, 0x5d, 0x07, 0x1f, 0x93, 0xa9, 0x98, - 0xc5, 0xa5, 0x13, 0xc5, 0xac, 0xda, 0x64, 0x25, 0x77, 0x9a, 0xd5, 0xa9, - 0xe9, 0x3a, 0x77, 0x62, 0xac, 0xf2, 0x76, 0xf4, 0x03, 0xb6, 0x03, 0x6e, - 0xef, 0x97, 0x13, 0x1c, 0xd1, 0xb9, 0x73, 0x12, 0xf7, 0x10, 0xbd, 0x1c, - 0xa1, 0xe7, 0xed, 0xd7, 0xa0, 0xd7, 0x53, 0xa1, 0x21, 0xf1, 0x5f, 0x1e, - 0xec, 0x36, 0x0d, 0x2c, 0xce, 0x74, 0x4a, 0x0c, 0x97, 0x5a, 0x76, 0x62, - 0x18, 0x9c, 0xc3, 0xc1, 0xc4, 0x5e, 0xf1, 0xfa, 0xe6, 0x4b, 0x15, 0xda, - 0xfa, 0xfd, 0xe9, 0x98, 0x09, 0xc3, 0x67, 0x63, 0x1f, 0x28, 0x37, 0xf0, - 0x59, 0x4b, 0x4b, 0xa3, 0xd1, 0x41, 0x94, 0xa6, 0x05, 0xb0, 0x93, 0xee, - 0x41, 0xa4, 0xce, 0xee, 0xea, 0xc4, 0x43, 0x6e, 0xab, 0x65, 0x70, 0xe3, - 0x4d, 0xf1, 0x02, 0xf5, 0x0f, 0xd5, 0x5e, 0xfd, 0x03, 0xcd, 0x22, 0x27, - 0x90, 0xf4, 0x98, 0xa2, 0xc0, 0xb4, 0xd5, 0x04, 0xfa, 0x75, 0x22, 0x4c, - 0xe7, 0xdd, 0xef, 0x3a, 0x1d, 0xb6, 0x00, 0x58, 0xcd, 0x5a, 0xbc, 0x12, - 0xea, 0x5a, 0xda, 0xa9, 0x18, 0x0e, 0xff, 0x51, 0xc4, 0xaf, 0xc8, 0x95, - 0xfb, 0x92, 0xdf, 0x99, 0xc9, 0x4e, 0xfe, 0xb1, 0xb0, 0xca, 0xa1, 0xba, - 0x90, 0xc8, 0x07, 0x34, 0x52, 0x6d, 0xd8, 0x05, 0x72, 0x2e, 0xee, 0x98, - 0xc0, 0x1e, 0x25, 0xb3, 0xa2, 0xb4, 0x9c, 0xa5, 0xdc, 0xd3, 0xb1, 0xdf, - 0x17, 0xd9, 0xda, 0xe9, 0x5d, 0x41, 0xca, 0xc7, 0xe4, 0x94, 0x0d, 0x67, - 0xba, 0x9c, 0xcf, 0x52, 0xf0, 0x00, 0x54, 0xe0, 0xbd, 0x3c, 0xc7, 0xb9, - 0x6a, 0x11, 0xc6, 0xd1, 0x62, 0xc3, 0xcf, 0xc2, 0x6a, 0x44, 0xeb, 0x41, - 0x43, 0x54, 0xe2, 0xf5, 0xc4, 0x11, 0xd7, 0x6a, 0xf2, 0x76, 0xa9, 0x16, - 0xae, 0xe2, 0x11, 0xfb, 0x04, 0x3d, 0xee, 0xd1, 0x98, 0x30, 0x0b, 0x6b, - 0x8a, 0x6f, 0x45, 0xb7, 0x01, 0x64, 0x46, 0x32, 0x61, 0xd5, 0x05, 0xfa, - 0xb1, 0x14, 0x54, 0x39, 0x13, 0x9b, 0xd5, 0x1d, 0x5c, 0xad, 0xd0, 0x5e, - 0x6d, 0xb3, 0xa1, 0xb3, 0xc5, 0x8d, 0xf8, 0x12, 0xd9, 0x5f, 0x94, 0x27, - 0xdf, 0x30, 0xc8, 0x0e, 0x3a, 0x46, 0x70, 0x5c, 0x4c, 0xaa, 0x24, 0xc3, - 0x50, 0x62, 0x52, 0xc8, 0x63, 0x64, 0xc9, 0x49, 0x74, 0x1c, 0xd2, 0x49, - 0x0f, 0x20, 0x69, 0x53, 0x97, 0x34, 0xc0, 0x92, 0x48, 0x28, 0x7b, 0x64, - 0xca, 0xea, 0x07, 0x6c, 0x63, 0x3e, 0xb6, 0xdb, 0xd5, 0x52, 0x9d, 0x7a, - 0x5f, 0x46, 0xc1, 0xb9, 0x3e, 0xe2, 0xe9, 0xeb, 0x04, 0x65, 0xc0, 0x74, - 0x4b, 0x07, 0x6a, 0x19, 0x4a, 0x9d, 0x05, 0xa0, 0xba, 0xae, 0x74, 0xef, - 0x62, 0x09, 0x57, 0x36, 0xe5, 0x9c, 0x54, 0x59, 0x3d, 0x04, 0xf0, 0xfb, - 0x6f, 0x89, 0x13, 0x1f, 0x1f, 0x88, 0x03, 0x6b, 0x0c, 0xeb, 0x53, 0xac, - 0x3a, 0x18, 0xa4, 0x93, 0xcc, 0x4f, 0xf5, 0x92, 0x44, 0x23, 0x9e, 0x67, - 0xf0, 0xf5, 0x2f, 0xb9, 0xc9, 0x34, 0x76, 0x97, 0x1d, 0x94, 0x75, 0x3f, - 0x47, 0x97, 0xe0, 0x30, 0xcc, 0xff, 0xd2, 0x7a, 0x3b, 0x04, 0xa7, 0xa5, - 0x62, 0x9e, 0xe4, 0x8f, 0xd8, 0x62, 0xee, 0x1d, 0x1c, 0xff, 0xad, 0x18, - 0xc9, 0x66, 0x47, 0x36, 0xfb, 0x2e, 0x74, 0x2a, 0xe7, 0x5f, 0xb2, 0x12, - 0xd2, 0x9e, 0xae, 0x2b, 0x92, 0xb8, 0x53, 0x66, 0x22, 0x5c, 0xa8, 0xaf, - 0x4f, 0x29, 0xab, 0x64, 0x50, 0x09, 0xe9, 0x2f, 0x2e, 0x62, 0x2e, 0x0e, - 0x8a, 0xd6, 0xeb, 0xa7, 0x5d, 0x3e, 0x9e, 0xe1, 0x39, 0x52, 0x13, 0x57, - 0x54, 0x5c, 0x78, 0xed, 0xb3, 0xfc, 0x5f, 0xa1, 0xf3, 0x2a, 0x77, 0x90, - 0xa9, 0x09, 0xa1, 0x05, 0x3b, 0xa9, 0x6a, 0xf5, 0xc4, 0xfa, 0x97, 0x79, - 0x64, 0x57, 0x1a, 0xf1, 0x74, 0xe5, 0x16, 0x93, 0xa9, 0xef, 0xe6, 0xdf, - 0x36, 0xd2, 0xd0, 0xe6, 0xb8, 0xdd, 0xe9, 0x13, 0x4c, 0xcd, 0x22, 0x98, - 0xc1, 0x94, 0xbb, 0x04, 0x2a, 0x4a, 0x69, 0x10, 0x5a, 0xcb, 0x1d, 0x9e, - 0xc4, 0x3d, 0x6d, 0x0e, 0xe0, 0x12, 0xb4, 0xe1, 0x6c, 0x55, 0x6f, 0xa3, - 0xf5, 0x1b, 0x0c, 0xe5, 0x1c, 0x99, 0x8b, 0x23, 0x23, 0xbc, 0x33, 0xe4, - 0xd4, 0x15, 0xfd, 0xcc, 0x90, 0x87, 0xb5, 0x0e, 0x24, 0xba, 0x20, 0x1b, - 0xcf, 0x67, 0x98, 0x1a, 0x35, 0xe7, 0xc3, 0x95, 0x29, 0xd6, 0xd2, 0x4f, - 0xe4, 0x14, 0xd5, 0xa1, 0x93, 0xff, 0x24, 0x0e, 0xfc, 0xb7, 0xd6, 0xde, - 0x05, 0xc5, 0x2f, 0xaa, 0x92, 0xd4, 0xd8, 0xac, 0x8f, 0x67, 0x45, 0xdb, - 0x36, 0x19, 0x15, 0x09, 0x9a, 0x3f, 0x2a, 0x56, 0xd5, 0xa9, 0x26, 0xb6, - 0xcb, 0x19, 0xf3, 0x6a, 0xbb, 0xba, 0xba, 0xa3, 0x68, 0x90, 0x0f, 0xb1, - 0x98, 0x14, 0x33, 0xd8, 0x12, 0xdf, 0xef, 0xe5, 0x01, 0x93, 0xab, 0xf8, - 0x93, 0x40, 0xbd, 0xa0, 0x01, 0x34, 0x54, 0xfd, 0xa0, 0xc4, 0xc3, 0xf3, - 0x6b, 0x90, 0x30, 0xc1, 0xbe, 0xd8, 0xbb, 0xab, 0x71, 0xaa, 0xe5, 0x3b, - 0x2d, 0x5d, 0x6e, 0x00, 0x34, 0xa8, 0x02, 0x34, 0xa9, 0x67, 0x95, 0xcd, - 0xed, 0xa2, 0x25, 0x55, 0xc9, 0x03, 0x1c, 0x30, 0xe7, 0xdf, 0xe6, 0xe7, - 0x2b, 0x5a, 0x9a, 0xcd, 0xa8, 0xf0, 0x4e, 0xe4, 0xd7, 0x90, 0x5f, 0x4e, - 0xbf, 0x5d, 0x68, 0x12, 0x1c, 0x4c, 0x68, 0x03, 0x9c, 0x49, 0xcb, 0xe6, - 0xc4, 0xfd, 0xad, 0xd5, 0xa8, 0xd8, 0xda, 0x2f, 0x13, 0xbc, 0x42, 0x61, - 0xa5, 0x0a, 0x1a, 0xe9, 0x5e, 0x5c, 0x01, 0x7c, 0xca, 0x73, 0x6f, 0x32, - 0xc1, 0x96, 0x24, 0x9d, 0x12, 0x20, 0x11, 0x6a, 0xf6, 0xbc, 0xff, 0x6a, - 0xc1, 0x58, 0x0d, 0xb9, 0xad, 0xc5, 0xde, 0x69, 0x37, 0xbe, 0xd9, 0x93, - 0xcc, 0x2b, 0xe9, 0x13, 0x45, 0xa0, 0x6c, 0x3f, 0x44, 0x34, 0xaf, 0x43, - 0x6d, 0xae, 0xef, 0xb2, 0x65, 0x03, 0xc1, 0xef, 0x10, 0x1e, 0xd8, 0x6e, - 0xb5, 0xb9, 0x03, 0xd8, 0x6e, 0x2f, 0x53, 0xe6, 0xc0, 0xaf, 0x44, 0xd2, - 0xd8, 0x15, 0x56, 0x15, 0x59, 0xd6, 0xd4, 0xe4, 0x1a, 0x25, 0xd5, 0xcf, - 0xe7, 0x6a, 0x55, 0xd4, 0xf8, 0x42, 0x4c, 0xcb, 0x9a, 0x48, 0x4d, 0x27, - 0x61, 0x4c, 0x36, 0x2b, 0xcb, 0x10, 0xba, 0xf7, 0xe3, 0x23, 0x27, 0xc5, - 0x6a, 0x1b, 0x94, 0x69, 0x64, 0xb1, 0x8c, 0xdb, 0xd4, 0x0d, 0x32, 0x3e, - 0x58, 0x73, 0xa8, 0x2f, 0x3d, 0x22, 0xd9, 0x0d, 0x2a, 0x52, 0xf0, 0xdd, - 0xeb, 0x21, 0x42, 0xc7, 0x59, 0x96, 0x09, 0x93, 0x5a, 0x70, 0xc3, 0x21, - 0x5f, 0xce, 0xc2, 0xdd, 0xcf, 0x61, 0xed, 0x1c, 0xfb, 0x2f, 0x57, 0xf7, - 0x31, 0xb8, 0x3e, 0x92, 0x29, 0xd4, 0x47, 0x6a, 0x19, 0x66, 0x00, 0xc2, - 0xc4, 0x6c, 0xb5, 0xc5, 0x68, 0x24, 0xa8, 0x64, 0x26, 0x72, 0x43, 0x20, - 0x9f, 0xf1, 0x3f, 0xac, 0x64, 0xb5, 0x12, 0x26, 0x13, 0x76, 0x52, 0x05, - 0xda, 0x57, 0xe3, 0x53, 0x73, 0x30, 0x21, 0x27, 0x75, 0x8d, 0x37, 0xd1, - 0x77, 0x40, 0x97, 0x2a, 0xb7, 0x0b, 0x2e, 0x9e, 0x4c, 0x36, 0x75, 0x44, - 0x15, 0xdb, 0x96, 0x70, 0xf9, 0x33, 0x9a, 0x1e, 0x6e, 0x13, 0x05, 0x38, - 0x2c, 0xbf, 0x0a, 0xdd, 0x2b, 0x2b, 0x38, 0x77, 0xa9, 0x00, 0x2d, 0x5e, - 0xee, 0x4b, 0xf3, 0x20, 0x7a, 0x90, 0x97, 0x44, 0xdf, 0x55, 0xfd, 0x50, - 0xe3, 0x24, 0x25, 0xa9, 0xd9, 0x3f, 0x6d, 0x09, 0x32, 0x67, 0xb5, 0x43, - 0xf1, 0xc7, 0xa7, 0xfb, 0x92, 0xde, 0xc3, 0xbf, 0x64, 0x6b, 0x35, 0xda, - 0x08, 0x94, 0x68, 0xb0, 0xc8, 0x3f, 0xb5, 0x9f, 0x15, 0x05, 0xff, 0x6c, - 0xbc, 0x22, 0x61, 0xf4, 0x67, 0xf8, 0x1f, 0x2e, 0x91, 0xc8, 0x12, 0xdc, - 0xcb, 0x22, 0x05, 0xb8, 0xab, 0x0d, 0x0e, 0xd7, 0x04, 0x8e, 0x32, 0x0e, - 0xfe, 0x72, 0x79, 0xc3, 0xba, 0xd8, 0x68, 0x3e, 0x5d, 0xab, 0xa0, 0xf8, - 0x26, 0x57, 0xe4, 0x20, 0x91, 0x0a, 0xde, 0x52, 0x95, 0xbc, 0xb7, 0x71, - 0x50, 0xe4, 0x3f, 0x07, 0x4c, 0xa8, 0x6a, 0xb6, 0xa0, 0x95, 0xe2, 0x31, - 0x8f, 0x5f, 0xfa, 0xdd, 0xee, 0x02, 0x23, 0x56, 0xf1, 0xdd, 0x1a, 0xa6, - 0xa0, 0x2d, 0x46, 0x36, 0x6c, 0x79, 0xe8, 0x67, 0x43, 0xdd, 0xe7, 0x2e, - 0x25, 0xda, 0x35, 0x6f, 0x63, 0xf1, 0x2c, 0x6c, 0x61, 0xaa, 0xb7, 0x51, - 0x91, 0xa1, 0x7c, 0x54, 0x9a, 0xf6, 0x3c, 0x3f, 0xa8, 0xba, 0x4d, 0xee, - 0xb6, 0xab, 0xa5, 0x05, 0xc6, 0xb6, 0xe8, 0x2f, 0x1b, 0x99, 0xb0, 0x45, - 0x3e, 0xc3, 0x50, 0x26, 0x0b, 0x10, 0x61, 0x5a, 0xc6, 0x25, 0x2d, 0x07, - 0xb6, 0x28, 0x59, 0xf3, 0xb4, 0x02, 0x61, 0xa0, 0xd0, 0x0a, 0xae, 0xd6, - 0x3c, 0xcc, 0x5f, 0xfb, 0xc0, 0xfd, 0xeb, 0x7b, 0xe2, 0x66, 0xc5, 0x98, - 0x70, 0x50, 0x31, 0x3a, 0x12, 0x45, 0xf4, 0x1c, 0xba, 0xa6, 0x92, 0x51, - 0xae, 0x68, 0xec, 0xb0, 0x1a, 0xd9, 0x45, 0x00, 0xd6, 0x9e, 0xad, 0x64, - 0xfe, 0xd9, 0xfb, 0xcc, 0x57, 0xff, 0x9e, 0xa3, 0x71, 0xe7, 0x7a, 0xaf, - 0x26, 0x31, 0x31, 0x6a, 0x41, 0xa4, 0x4d, 0x68, 0xbc, 0xcb, 0xfa, 0xb4, - 0x3a, 0x1c, 0x3a, 0x8f, 0xcd, 0xc1, 0x95, 0xb2, 0x46, 0x72, 0xf7, 0xfc, - 0x20, 0xe2, 0x2f, 0x0f, 0xbd, 0x74, 0xe1, 0x2a, 0xd5, 0xf6, 0xe9, 0xe1, - 0x45, 0x7d, 0x95, 0xb0, 0x49, 0xce, 0xe8, 0x53, 0x69, 0x46, 0x9d, 0x03, - 0x5f, 0x15, 0x2e, 0x92, 0x4c, 0xb7, 0xf1, 0x43, 0x67, 0x8a, 0x43, 0xc6, - 0x90, 0xec, 0xb5, 0x5d, 0xd5, 0x64, 0x16, 0x6e, 0xf0, 0xad, 0x4e, 0xf0, - 0x56, 0xe8, 0x77, 0xd5, 0x47, 0x47, 0x41, 0xc9, 0x98, 0x3a, 0xcb, 0xe0, - 0x01, 0x77, 0x93, 0x15, 0xe0, 0xd3, 0x93, 0xbe, 0xe1, 0x97, 0xe0, 0x21, - 0x60, 0x2b, 0xf1, 0x4a, 0x62, 0x29, 0x11, 0xe9, 0x61, 0x55, 0xc4, 0x57, - 0x04, 0xa8, 0xb3, 0xb3, 0x61, 0xd7, 0xa6, 0xce, 0x50, 0xd2, 0xc3, 0x38, - 0xda, 0xc2, 0x23, 0x67, 0x37, 0x09, 0xa7, 0xfd, 0x29, 0xdc, 0xcc, 0x52, - 0x65, 0xea, 0x3f, 0xcc, 0x67, 0x5e, 0x3b, 0xd4, 0x59, 0x59, 0x12, 0x9b, - 0xf1, 0xd2, 0x43, 0x46, 0x54, 0xcd, 0xb9, 0xbe, 0x71, 0xb6, 0x6d, 0x6a, - 0x62, 0xc5, 0x59, 0xc1, 0x21, 0xf7, 0x4c, 0x91, 0x64, 0xe0, 0xd7, 0xd9, - 0x34, 0x60, 0x0d, 0xb2, 0x93, 0xd8, 0xd3, 0x01, 0x8b, 0xf3, 0x9c, 0x6c, - 0xff, 0x63, 0xca, 0xd2, 0xf4, 0x76, 0xe3, 0x60, 0x52, 0x5c, 0x0e, 0xa3, - 0x13, 0xc8, 0xd9, 0xa7, 0x13, 0x6d, 0x1b, 0x29, 0xc0, 0xb1, 0x54, 0x31, - 0x33, 0x55, 0x44, 0x0a, 0x0a, 0x96, 0x3f, 0xf0, 0xb2, 0x64, 0x23, 0xa1, - 0xc8, 0x08, 0x01, 0x94, 0x2f, 0xc8, 0x0a, 0xfb, 0x93, 0x38, 0xe4, 0xc1, - 0xd9, 0xea, 0x46, 0x96, 0xdd, 0x5d, 0x62, 0xfc, 0xb0, 0x4d, 0x17, 0xe8, - 0xa0, 0xd4, 0x35, 0x98, 0x65, 0xb0, 0x27, 0x97, 0xbc, 0xe8, 0x48, 0x38, - 0x90, 0x9b, 0x6e, 0xf1, 0xd2, 0x17, 0x1b, 0xbf, 0x03, 0xc6, 0xa3, 0x42, - 0xaf, 0xdc, 0x44, 0x9d, 0x9e, 0x69, 0x67, 0x33, 0x61, 0xfb, 0x96, 0xfa, - 0xff, 0xf4, 0xa8, 0x3c, 0xb6, 0x42, 0xd2, 0x4c, 0xc0, 0xa8, 0x2a, 0x4b, - 0x37, 0x78, 0x41, 0x94, 0xf6, 0x04, 0xb9, 0x54, 0xe4, 0x2b, 0xfc, 0xed, - 0xf5, 0xf7, 0x62, 0x23, 0x44, 0xc4, 0xd7, 0x5a, 0xeb, 0xc2, 0x3d, 0x4c, - 0x41, 0x22, 0xa0, 0xe3, 0x22, 0xbc, 0x91, 0x69, 0x37, 0x3f, 0x94, 0xfd, - 0x07, 0xa7, 0x6e, 0x53, 0x27, 0xdc, 0xb0, 0x14, 0x8d, 0x0a, 0x08, 0x31, - 0xba, 0xf0, 0xd0, 0xda, 0xa6, 0x7a, 0xc0, 0x4c, 0x9d, 0x3b, 0x8f, 0xee, - 0x11, 0xc7, 0x9f, 0xc9, 0xcc, 0x4c, 0x26, 0x51, 0xb4, 0x10, 0xde, 0xc2, - 0xa3, 0xe0, 0xaa, 0x7c, 0x9c, 0x27, 0x8d, 0x04, 0x8e, 0xfc, 0xe4, 0x68, - 0x93, 0xf9, 0x67, 0x28, 0xa0, 0xe6, 0xca, 0xbd, 0x5a, 0x64, 0x98, 0x9f, - 0xe3, 0x7b, 0x16, 0x5d, 0x61, 0xcc, 0x4c, 0x64, 0x04, 0x1b, 0xcc, 0xa6, - 0xa2, 0x31, 0x28, 0xa2, 0xac, 0xd0, 0xce, 0x40, 0x19, 0xe7, 0xf9, 0xea, - 0xc5, 0x98, 0x50, 0x16, 0x38, 0xad, 0x58, 0x21, 0x2e, 0x10, 0x48, 0x4f, - 0xe7, 0xc0, 0xc0, 0x6c, 0xcd, 0xe2, 0xc3, 0xcd, 0xc5, 0xfc, 0x26, 0x91, - 0xea, 0xcf, 0x52, 0x97, 0x9f, 0xdc, 0x2c, 0x45, 0xd8, 0x50, 0xf8, 0x75, - 0xa2, 0x93, 0x52, 0x2b, 0x23, 0xd3, 0x30, 0x9d, 0xa7, 0xf7, 0xbb, 0xc2, - 0xd2, 0xb7, 0x9d, 0xec, 0xf9, 0x9a, 0xec, 0x3e, 0xc0, 0xce, 0x64, 0xb8, - 0xf5, 0x41, 0x4e, 0x06, 0xa1, 0x25, 0xf2, 0x40, 0xee, 0x07, 0xec, 0x6d, - 0x9a, 0xd0, 0x5c, 0xdd, 0xe9, 0xf5, 0x56, 0xf9, 0x2e, 0xf5, 0xdb, 0x69, - 0xc9, 0x3e, 0xb5, 0x0c, 0xbc, 0x29, 0xa4, 0xa9, 0x55, 0x9b, 0xf6, 0xab, - 0x1f, 0x55, 0x9d, 0x25, 0xd2, 0xde, 0x3f, 0xa0, 0xe5, 0x1c, 0xb3, 0x90, - 0x2f, 0x6c, 0xaf, 0xb5, 0x6d, 0x23, 0x15, 0xab, 0x91, 0x55, 0x5f, 0x02, - 0x20, 0x22, 0x8e, 0xc1, 0x4a, 0x63, 0xa6, 0x5e, 0x85, 0x99, 0x58, 0xdc, - 0xde, 0xb0, 0x76, 0x9f, 0x21, 0x4d, 0xe9, 0x47, 0xcc, 0x3f, 0x02, 0x91, - 0x75, 0x67, 0xe5, 0x6a, 0x2c, 0xc3, 0x69, 0x95, 0x2d, 0x74, 0x77, 0xf7, - 0x1d, 0xe1, 0x12, 0x2b, 0xcf, 0x4c, 0x7b, 0xcf, 0xbe, 0x24, 0x1d, 0x07, - 0x34, 0xd3, 0x67, 0xa8, 0xb9, 0x76, 0x2a, 0x3e, 0xfd, 0xb5, 0xcd, 0xf6, - 0x29, 0x07, 0x4e, 0x17, 0xcf, 0x28, 0xdd, 0x90, 0x4b, 0x17, 0x24, 0x55, - 0xdc, 0x78, 0xe5, 0xf4, 0x97, 0x31, 0x3d, 0xfa, 0x96, 0xe2, 0x99, 0x61, - 0xb1, 0xcb, 0xa4, 0x7b, 0x4e, 0x5d, 0x6a, 0xf8, 0xb2, 0x79, 0xfc, 0xa9, - 0xd9, 0x27, 0x46, 0xdd, 0x52, 0xdf, 0x24, 0x66, 0x1c, 0xa6, 0xbc, 0x18, - 0x13, 0x72, 0x38, 0x53, 0xac, 0x1b, 0x67, 0x1f, 0x30, 0xae, 0x5a, 0xf3, - 0x55, 0xd0, 0xe1, 0x23, 0x9a, 0x46, 0xa4, 0xbb, 0x68, 0x73, 0x30, 0xda, - 0xb7, 0x3b, 0xff, 0xd1, 0x0d, 0xe0, 0xf7, 0xda, 0x36, 0x3a, 0x7a, 0x19, - 0xf5, 0x2e, 0xf4, 0xda, 0xa4, 0x09, 0x94, 0xb8, 0x18, 0xad, 0x6b, 0xf6, - 0x64, 0xbf, 0x2a, 0x04, 0xc6, 0xde, 0x0f, 0x45, 0x27, 0x3a, 0x3d, 0x61, - 0xf5, 0xde, 0x38, 0x1d, 0x23, 0x23, 0x70, 0x00, 0xfc, 0x0c, 0x5c, 0x96, - 0xc1, 0x21, 0x78, 0x25, 0x24, 0x71, 0xd1, 0xe2, 0xe9, 0x1a, 0x2f, 0x48, - 0x4d, 0x09, 0x24, 0x27, 0xe4, 0xe7, 0x42, 0x76, 0x92, 0x93, 0x7a, 0x62, - 0x76, 0xc6, 0xd7, 0xdf, 0xe4, 0x5e, 0x0e, 0xfc, 0x4e, 0x0a, 0x65, 0x63, - 0x51, 0x90, 0xfd, 0x92, 0x5f, 0x9a, 0x49, 0xa9, 0x6c, 0xb1, 0xb6, 0xe6, - 0xab, 0xf7, 0xb9, 0x39, 0xc0, 0xed, 0x1d, 0x65, 0x9c, 0x24, 0x21, 0xc1, - 0x0d, 0xd6, 0x9a, 0xbe, 0xd4, 0x74, 0xa2, 0x70, 0xab, 0x0b, 0x45, 0xf0, - 0xc9, 0xaa, 0xf1, 0x49, 0x0b, 0x6c, 0x20, 0xdc, 0x37, 0x2b, 0x13, 0x68, - 0x48, 0x0e, 0xd8, 0xd1, 0x67, 0xd8, 0xa3, 0x7e, 0xd7, 0xb7, 0x50, 0xc8, - 0x14, 0x58, 0x6a, 0x04, 0xa5, 0x70, 0x22, 0x2d, 0x41, 0xea, 0x28, 0xb7, - 0xf0, 0xde, 0xc4, 0xe4, 0x5b, 0x4d, 0xc1, 0x33, 0x9e, 0x14, 0x32, 0xa8, - 0x9b, 0xc8, 0xd9, 0x5b, 0x95, 0x2a, 0x91, 0x9d, 0xe8, 0x15, 0x19, 0x9b, - 0x38, 0xf3, 0x35, 0x69, 0x3e, 0xd3, 0x4b, 0xcc, 0xf2, 0x94, 0x5a, 0xaf, - 0x91, 0xa4, 0xa1, 0x03, 0x48, 0x5f, 0x6d, 0x16, 0x56, 0x03, 0x5a, 0xcb, - 0x99, 0x19, 0x45, 0x9c, 0xba, 0xc9, 0xbc, 0x5b, 0x0f, 0xf5, 0xde, 0x70, - 0xa3, 0x70, 0x0d, 0x3f, 0x3e, 0x5c, 0x4d, 0x5a, 0x1a, 0x46, 0x1b, 0x44, - 0x4a, 0x73, 0xfa, 0xb1, 0xc4, 0x42, 0x7b, 0x0c, 0x15, 0x0d, 0x35, 0xc4, - 0xa3, 0xea, 0x17, 0xa0, 0x0b, 0xfb, 0x4d, 0x1b, 0x2f, 0x96, 0x1f, 0xaa, - 0xc0, 0xad, 0xdc, 0xf3, 0xb2, 0xb1, 0x44, 0x1f, 0x39, 0xc7, 0x33, 0x18, - 0xad, 0xe1, 0x50, 0x7d, 0xf9, 0x2a, 0x90, 0xf2, 0x06, 0xce, 0x07, 0xae, - 0x9f, 0xbc, 0x4d, 0xae, 0x30, 0xdd, 0x47, 0xa2, 0xd3, 0x6d, 0x0c, 0xc6, - 0xb7, 0xae, 0xf5, 0x38, 0xa3, 0x00, 0x59, 0x6a, 0x00, 0x04, 0xd2, 0x77, - 0x0a, 0x58, 0xc9, 0xaf, 0x1b, 0x59, 0x29, 0xf3, 0xdd, 0x58, 0xcf, 0xa1, - 0x6d, 0xb4, 0x66, 0x23, 0x9f, 0x9b, 0x41, 0x2a, 0xc8, 0x28, 0x34, 0x77, - 0x3a, 0x1f, 0xa5, 0xde, 0x4b, 0x3f, 0xc7, 0x19, 0xf5, 0xdb, 0x98, 0xc4, - 0x6c, 0x2f, 0x34, 0x20, 0xc9, 0x52, 0x16, 0x60, 0xbc, 0x04, 0xd5, 0xff, - 0x4b, 0x07, 0x28, 0x5a, 0x3a, 0x48, 0x5b, 0x96, 0xee, 0x1f, 0xf1, 0xb4, - 0x9b, 0xb5, 0x64, 0xde, 0x1c, 0xd5, 0x3c, 0x1b, 0x98, 0x11, 0xc7, 0x0b, - 0x97, 0x00, 0x2f, 0x8f, 0xf9, 0x24, 0x4d, 0xba, 0x75, 0x6a, 0xce, 0xd8, - 0x7a, 0xee, 0x02, 0xd5, 0x19, 0xd6, 0x26, 0x40, 0xa7, 0x78, 0x76, 0x1a, - 0x17, 0xc2, 0xe6, 0x5a, 0x6e, 0x24, 0xb1, 0x17, 0xf8, 0x9f, 0xdc, 0x64, - 0xf0, 0x59, 0xc5, 0xfc, 0x4c, 0xbb, 0x3d, 0x3f, 0x70, 0x2c, 0x0d, 0xf5, - 0x6c, 0x96, 0x46, 0x1a, 0x1e, 0x5f, 0xd1, 0x3a, 0x00, 0x9a, 0x9d, 0x63, - 0xe6, 0xd1, 0xa2, 0x5a, 0x4a, 0x50, 0xa8, 0xd5, 0x91, 0x90, 0x69, 0x58, - 0x65, 0x00, 0xc7, 0xf1, 0xa6, 0x45, 0xfd, 0x5a, 0xe6, 0x05, 0x4b, 0xb2, - 0x3a, 0xdf, 0xa9, 0xd9, 0xe5, 0xa6, 0xe5, 0xe2, 0x5b, 0x3b, 0x2f, 0x57, - 0x6c, 0xc4, 0x06, 0xe1, 0x8e, 0x15, 0x98, 0xc8, 0x5e, 0x63, 0xba, 0x37, - 0xe6, 0x91, 0x5f, 0x1c, 0x5b, 0x77, 0xb5, 0x91, 0x07, 0x3a, 0xa6, 0x67, - 0x6d, 0xdf, 0x15, 0x62, 0x6b, 0x3b, 0xed, 0xa2, 0xc7, 0x46, 0x52, 0x8f, - 0xf2, 0x9f, 0x69, 0x00, 0xb8, 0x49, 0xcf, 0xd4, 0xf0, 0x95, 0x51, 0xda, - 0x0f, 0x4e, 0x0d, 0x11, 0x2f, 0x27, 0x73, 0xe9, 0x13, 0xcb, 0xa1, 0xfc, - 0x6b, 0x45, 0xf0, 0xfd, 0xc7, 0x17, 0xaa, 0x0c, 0xac, 0x98, 0xc4, 0x6c, - 0xf0, 0x32, 0x45, 0x67, 0xfe, 0x6f, 0x2e, 0xfb, 0xec, 0x19, 0xda, 0xbd, - 0x93, 0x5f, 0x50, 0xc2, 0x22, 0x9a, 0x3a, 0x5b, 0x31, 0xf5, 0x4e, 0x91, - 0xa6, 0xea, 0x67, 0xdd, 0x69, 0xf4, 0xd7, 0xea, 0x02, 0xbe, 0x55, 0x52, - 0xb9, 0x30, 0x21, 0xe5, 0xfc, 0x9a, 0x93, 0xd6, 0x6c, 0x33, 0x06, 0xb9, - 0xe3, 0xb0, 0x6a, 0xff, 0x9e, 0xc2, 0x5e, 0x1d, 0xd6, 0xdb, 0xa1, 0x60, - 0x34, 0x5d, 0x08, 0xf9, 0xeb, 0xd6, 0x1f, 0x90, 0xf1, 0xf4, 0x07, 0x47, - 0xbf, 0xd9, 0xc9, 0xe8, 0xcf, 0xce, 0xa5, 0x1d, 0xb0, 0xd9, 0xbe, 0xc7, - 0xfb, 0xcc, 0xac, 0x3e, 0x92, 0x59, 0x0d, 0x1d, 0x65, 0x16, 0xa3, 0xdc, - 0x9b, 0x72, 0x22, 0x46, 0x04, 0xca, 0xb3, 0x5a, 0x2f, 0x3d, 0x99, 0x5c, - 0xb5, 0xb9, 0x30, 0xe3, 0xde, 0x8c, 0xba, 0xc7, 0x4c, 0xe5, 0x34, 0x6e, - 0xf4, 0x75, 0xf4, 0x38, 0x01, 0xf1, 0x61, 0xb8, 0x2b, 0xc3, 0x6f, 0xae, - 0xd1, 0x0a, 0x9d, 0x48, 0xc9, 0xe7, 0xc3, 0xe7, 0xc9, 0xe1, 0x6f, 0x96, - 0xa0, 0xc2, 0x91, 0xfd, 0xad, 0x99, 0x48, 0xde, 0xfc, 0xa3, 0x6e, 0xe3, - 0x94, 0x0e, 0xb5, 0xf6, 0x24, 0x8b, 0xce, 0x70, 0x3c, 0xdc, 0xe2, 0x66, - 0x9f, 0xe3, 0x6b, 0xc5, 0xd1, 0x97, 0x38, 0x12, 0x46, 0x37, 0xd6, 0x9a, - 0x4c, 0x6d, 0x4a, 0x2d, 0xc3, 0x28, 0x20, 0x2f, 0x55, 0x67, 0x17, 0x71, - 0xd3, 0x5c, 0xdc, 0xa3, 0x23, 0x60, 0x25, 0x2d, 0xe0, 0xc2, 0xed, 0xee, - 0x67, 0x9f, 0x26, 0xfb, 0x2f, 0x63, 0xf2, 0x6a, 0x23, 0x45, 0x26, 0x2c, - 0x33, 0x8a, 0xf2, 0xd1, 0xb2, 0x77, 0x99, 0x98, 0xd6, 0x18, 0xfe, 0xf3, - 0xff, 0xa4, 0x36, 0x03, 0xf4, 0xf5, 0xb1, 0xca, 0xa3, 0x5f, 0xe2, 0xc6, - 0xb2, 0x55, 0x2c, 0xaa, 0x64, 0xef, 0x28, 0x3a, 0x9e, 0x98, 0x01, 0x57, - 0x49, 0x98, 0x61, 0x4f, 0x42, 0x57, 0x00, 0x19, 0xb9, 0xa8, 0xec, 0xed, - 0x2b, 0x63, 0xf3, 0x0c, 0x3a, 0x1f, 0x10, 0xab, 0xe9, 0x6e, 0x61, 0x69, - 0xd1, 0x2d, 0xf3, 0x1f, 0xaa, 0x00, 0x57, 0xe2, 0xab, 0x74, 0xcd, 0xff, - 0x97, 0x2c, 0x3b, 0x67, 0xae, 0xa3, 0xfc, 0x69, 0xa9, 0x4e, 0x42, 0x07, - 0xfc, 0xbf, 0x36, 0x1a, 0xef, 0x6d, 0x6d, 0x14, 0x61, 0x30, 0x27, 0x98, - 0xfa, 0xf8, 0xc9, 0x70, 0xb4, 0xaa, 0x53, 0x48, 0x72, 0x3f, 0x58, 0x69, - 0x8d, 0x08, 0xc8, 0x09, 0x2b, 0xfc, 0x1d, 0xa1, 0x92, 0xae, 0x62, 0xa0, - 0xea, 0x05, 0x40, 0xac, 0x9c, 0xaf, 0x0e, 0xf4, 0x1e, 0x45, 0x33, 0xee, - 0x31, 0x39, 0x08, 0x4b, 0x54, 0x02, 0x2d, 0x03, 0x1c, 0xe6, 0x2d, 0x0c, - 0xd0, 0x92, 0x44, 0xd6, 0xa1, 0x57, 0x4e, 0x17, 0xde, 0xe6, 0x4f, 0x6a, - 0x07, 0x9f, 0x58, 0xe2, 0x27, 0xdb, 0xa9, 0x0c, 0x19, 0x56, 0xa3, 0xb4, - 0xc4, 0xe8, 0xa3, 0x52, 0x9f, 0x6a, 0xc9, 0xb1, 0xda, 0xe9, 0xef, 0x12, - 0xc1, 0x6d, 0x5b, 0x04, 0x20, 0x93, 0xac, 0xf4, 0x38, 0x95, 0xdb, 0x50, - 0xa6, 0x2e, 0x5c, 0x3f, 0x2d, 0x32, 0x50, 0x03, 0x73, 0x64, 0x3a, 0xd5, - 0xfd, 0x98, 0x1c, 0x57, 0xc3, 0xe7, 0xf7, 0x14, 0x13, 0x15, 0x2a, 0xa2, - 0x5f, 0xa0, 0x67, 0xdd, 0x67, 0x00, 0x09, 0xc6, 0xfe, 0xad, 0x06, 0x4c, - 0x5e, 0x9a, 0x5b, 0x55, 0x06, 0x8c, 0x9a, 0x2a, 0x51, 0x0e, 0x4f, 0x15, - 0xcc, 0xe1, 0x53, 0x9c, 0x43, 0x37, 0xc1, 0x3e, 0x02, 0x4b, 0x98, 0x6f, - 0x9b, 0x60, 0x31, 0x2c, 0x2b, 0x9d, 0xda, 0xe0, 0x1d, 0xe4, 0x49, 0x66, - 0x65, 0x18, 0xfb, 0x24, 0x97, 0xe0, 0x2d, 0xf5, 0x44, 0x23, 0x09, 0x01, - 0xf9, 0xf5, 0x29, 0xff, 0x01, 0x36, 0xb9, 0x0e, 0x9b, 0xb3, 0x23, 0x1e, - 0xe5, 0x12, 0xbb, 0x3a, 0x04, 0x14, 0xb8, 0x23, 0x43, 0x95, 0xc1, 0x9d, - 0x57, 0x45, 0x46, 0x4c, 0x8f, 0x35, 0x25, 0x5f, 0x2b, 0xd9, 0xc6, 0xdd, - 0x61, 0xb8, 0xbb, 0x4d, 0x49, 0xef, 0x6e, 0x0c, 0x50, 0x07, 0xc9, 0x9b, - 0x2e, 0xb7, 0xbe, 0x23, 0xc3, 0xcf, 0x9d, 0xeb, 0x13, 0xc8, 0xeb, 0x72, - 0x51, 0x71, 0x69, 0x35, 0xf3, 0xce, 0x35, 0x45, 0x02, 0xba, 0x44, 0x5d, - 0xaf, 0xd0, 0xe5, 0x1d, 0x9b, 0x18, 0xbb, 0x62, 0xce, 0xaf, 0x40, 0x48, - 0x40, 0x2a, 0x5d, 0xcd, 0xa7, 0x2b, 0x8f, 0xf4, 0x4a, 0x4c, 0xe1, 0x59, - 0x40, 0x63, 0x33, 0xae, 0xd8, 0x9d, 0x4d, 0x11, 0x3d, 0x2d, 0x11, 0xc6, - 0x8c, 0xa9, 0xab, 0xa2, 0x08, 0xb8, 0xbf, 0x09, 0x66, 0xbc, 0xd7, 0xab, - 0xce, 0x0d, 0xe0, 0x9e, 0x51, 0x2f, 0x5c, 0xc7, 0x21, 0xb9, 0xcf, 0xc4, - 0x8b, 0xc0, 0x4b, 0x04, 0x1b, 0xfd, 0x43, 0xcf, 0xa4, 0x72, 0x62, 0x04, - 0x0b, 0x1f, 0x9f, 0x35, 0x9d, 0xa9, 0x19, 0x71, 0x06, 0xda, 0x03, 0x0f, - 0xcc, 0x3a, 0xf4, 0x3a, 0xaf, 0x07, 0x0f, 0xf2, 0x3e, 0x4a, 0xd3, 0x41, - 0x6a, 0x90, 0x35, 0x39, 0x4c, 0x1d, 0x2f, 0x05, 0xff, 0xcf, 0xc0, 0xbe, - 0x0f, 0xaf, 0x90, 0x4e, 0x45, 0x8c, 0x78, 0x4d, 0x6b, 0xf2, 0x47, 0x26, - 0xe9, 0x0d, 0xee, 0xd3, 0x97, 0x44, 0xaf, 0x6f, 0x95, 0x30, 0x9c, 0x08, - 0xe5, 0x18, 0x9e, 0xad, 0xd2, 0x2a, 0x0c, 0x21, 0x67, 0x50, 0x28, 0x4f, - 0x31, 0x9c, 0xee, 0xb2, 0x95, 0xbd, 0xef, 0xc0, 0xd0, 0x0d, 0xd4, 0x6e, - 0xff, 0x93, 0x12, 0xc3, 0x51, 0x41, 0xe4, 0x6c, 0x19, 0x09, 0xd7, 0x0a, - 0xe0, 0xea, 0x0a, 0xe7, 0xa8, 0x4b, 0x60, 0xd6, 0x0c, 0x4d, 0xb5, 0x29, - 0x01, 0x74, 0xf9, 0x40, 0x8c, 0x6b, 0x11, 0xf6, 0xe4, 0xc9, 0x3c, 0x1a, - 0xf7, 0xce, 0x2c, 0xd8, 0xe3, 0x0e, 0xc5, 0xb9, 0x6c, 0x40, 0x44, 0xc9, - 0x04, 0xf6, 0x5c, 0xe1, 0x9f, 0xc7, 0xe0, 0x68, 0xe7, 0x6a, 0x92, 0xe7, - 0xb2, 0x12, 0x72, 0x3f, 0xfd, 0xc3, 0x06, 0xeb, 0x0a, 0xab, 0x6d, 0xad, - 0x03, 0x0b, 0x5d, 0xcc, 0x49, 0x04, 0x52, 0x19, 0xd4, 0x9d, 0x67, 0xbf, - 0xd3, 0xf4, 0x22, 0x76, 0x99, 0x52, 0xf5, 0xb5, 0x15, 0x38, 0x58, 0x57, - 0x9a, 0xa2, 0xd1, 0xbb, 0x3a, 0x07, 0xe2, 0xd6, 0x8d, 0x69, 0x9e, 0x5c, - 0xf4, 0xba, 0xda, 0x4a, 0x4d, 0x73, 0xdc, 0x32, 0xfd, 0xe1, 0x3a, 0x16, - 0xf1, 0x09, 0x26, 0x3b, 0x2a, 0xa9, 0xa7, 0x2c, 0xd3, 0xcf, 0x6b, 0xc5, - 0xb5, 0xbc, 0x71, 0xb6, 0x9e, 0xa0, 0x6a, 0x69, 0xa5, 0xeb, 0x54, 0x87, - 0xe9, 0x4f, 0x69, 0x39, 0xc5, 0x54, 0x28, 0x55, 0xb9, 0xff, 0x5d, 0x9e, - 0x17, 0x8e, 0x8c, 0xd5, 0x14, 0x5c, 0xa7, 0x33, 0x5a, 0x2f, 0x2d, 0x37, - 0x0e, 0xf2, 0x54, 0x64, 0x9d, 0xdf, 0x49, 0xab, 0xd3, 0x0f, 0xbd, 0xad, - 0x19, 0xb9, 0xcf, 0x0f, 0x40, 0x62, 0x4b, 0x93, 0xd7, 0xf4, 0x3b, 0xee, - 0x2b, 0x97, 0xe3, 0x55, 0xb3, 0x5b, 0x3f, 0x93, 0xa5, 0xf1, 0x40, 0x99, - 0xa1, 0x69, 0xbd, 0xf3, 0xf0, 0xb1, 0x6e, 0x5c, 0xba, 0x4a, 0xc4, 0x51, - 0x8e, 0xe1, 0x5c, 0xb8, 0x92, 0xb5, 0x43, 0xc4, 0x9e, 0x38, 0x0d, 0xfb, - 0x60, 0xb3, 0xe6, 0x0f, 0x55, 0x94, 0x01, 0xaf, 0xaa, 0xc3, 0x6d, 0xea, - 0xb2, 0xfc, 0xb0, 0x06, 0x29, 0x0f, 0xd3, 0x95, 0xb9, 0xf1, 0x8b, 0xce, - 0xd3, 0x5d, 0x16, 0xbf, 0x5c, 0x24, 0xc5, 0x36, 0x98, 0x8c, 0x5b, 0x43, - 0xe7, 0xfe, 0x77, 0xda, 0xc5, 0xd8, 0xf6, 0x72, 0xba, 0xcf, 0x9c, 0x18, - 0x58, 0xb8, 0xe4, 0x1d, 0xf6, 0xfb, 0x3b, 0xb4, 0x1f, 0xea, 0xa3, 0xe3, - 0xd5, 0xbe, 0x3f, 0xd5, 0xf9, 0xc4, 0x00, 0x8e, 0x17, 0x22, 0x3d, 0x96, - 0xd8, 0xb6, 0xa5, 0xf6, 0xcd, 0x55, 0x48, 0x8b, 0x1b, 0x38, 0x9c, 0xd7, - 0x6d, 0x40, 0x2a, 0x5f, 0xcf, 0xcb, 0x67, 0xa4, 0x8c, 0xf4, 0x8f, 0x70, - 0x34, 0xeb, 0x70, 0xcd, 0xee, 0x1c, 0xbd, 0xae, 0xd1, 0xc1, 0xf8, 0x62, - 0x45, 0xb5, 0x5d, 0xe6, 0x0b, 0xd4, 0x3d, 0x23, 0xf0, 0x27, 0x44, 0x56, - 0x32, 0x4d, 0xb1, 0x6c, 0x5d, 0x33, 0x94, 0x77, 0xe3, 0xac, 0x54, 0x56, - 0x24, 0x05, 0x26, 0x4a, 0xf0, 0x59, 0xfb, 0x1f, 0xa4, 0x0f, 0xbe, 0x9e, - 0xbc, 0x76, 0x9d, 0x5a, 0xed, 0x15, 0x97, 0x4e, 0x05, 0x8a, 0x8b, 0xff, - 0xc7, 0x9b, 0x67, 0x32, 0x12, 0x41, 0x04, 0xcb, 0x24, 0xae, 0x9e, 0xcc, - 0xd6, 0xc6, 0x67, 0x53, 0xfa, 0x29, 0x37, 0x73, 0xc6, 0xdf, 0xf2, 0x56, - 0x72, 0x06, 0x03, 0xaa, 0x5d, 0x07, 0xac, 0x38, 0xb9, 0x2a, 0x61, 0x02, - 0x24, 0xcf, 0x54, 0x3f, 0x98, 0xb0, 0x5c, 0xba, 0xe3, 0x15, 0x27, 0x52, - 0x63, 0x43, 0x12, 0x62, 0x33, 0x02, 0xb8, 0x69, 0x52, 0x70, 0x6c, 0xc0, - 0x23, 0x37, 0x65, 0x4b, 0xc9, 0xea, 0x98, 0x06, 0xde, 0x3d, 0x59, 0x72, - 0x94, 0x48, 0x60, 0xeb, 0xe7, 0xaa, 0x68, 0x72, 0x22, 0x15, 0x39, 0xf0, - 0x47, 0x43, 0xeb, 0x37, 0xb1, 0x3b, 0x9e, 0x05, 0x12, 0xdb, 0x74, 0x18, - 0xfe, 0x11, 0xcb, 0xae, 0xe0, 0xed, 0x1c, 0xe3, 0x19, 0x71, 0x56, 0xa6, - 0x04, 0xe6, 0x20, 0x62, 0xfd, 0xb1, 0x57, 0x44, 0xca, 0x3f, 0xdf, 0x51, - 0x23, 0x76, 0x3b, 0x70, 0x27, 0x33, 0x62, 0x74, 0x94, 0xff, 0x70, 0xcc, - 0xd4, 0xbf, 0x67, 0x12, 0x17, 0x5f, 0x71, 0xf8, 0x8f, 0x09, 0xca, 0xb5, - 0x49, 0x38, 0xcf, 0x1f, 0x94, 0x9a, 0xe6, 0x76, 0x0e, 0xa6, 0x5a, 0x2c, - 0x36, 0x61, 0x41, 0x2d, 0x14, 0x2f, 0x35, 0xa2, 0xaa, 0x2d, 0xd5, 0x54, - 0x3c, 0x4e, 0xa0, 0x63, 0xa9, 0x9e, 0xe9, 0x65, 0x62, 0xcf, 0x5a, 0x1a, - 0xb9, 0x70, 0xf7, 0xf1, 0x8a, 0xc7, 0x19, 0x6e, 0x34, 0xa0, 0xbb, 0x1b, - 0x76, 0x9b, 0x60, 0x20, 0xfd, 0xff, 0xe1, 0x40, 0x5e, 0xd7, 0x49, 0xd3, - 0x3c, 0x0f, 0x52, 0xae, 0x37, 0x38, 0x1d, 0xd5, 0xd0, 0xe7, 0xd6, 0xfc, - 0x06, 0x3b, 0x50, 0x06, 0x9c, 0xb4, 0x37, 0x9a, 0x53, 0x09, 0x56, 0xa4, - 0xa8, 0x64, 0x70, 0xa7, 0xaf, 0xb9, 0xd9, 0x19, 0xbc, 0x5b, 0x04, 0x07, - 0x68, 0xc0, 0xa4, 0xc0, 0x3d, 0x32, 0x36, 0x94, 0x24, 0xd3, 0x36, 0x1f, - 0xfc, 0xd8, 0x26, 0x49, 0x94, 0xd2, 0x1e, 0x8b, 0x0c, 0x70, 0x6e, 0xd7, - 0xd2, 0x37, 0x8f, 0x13, 0xef, 0x41, 0xdb, 0x53, 0xb5, 0xba, 0xe5, 0xe3, - 0x0c, 0xcd, 0xa3, 0xfa, 0x74, 0x16, 0xd9, 0x42, 0x10, 0xa3, 0xe6, 0x26, - 0xd6, 0x74, 0xbc, 0x17, 0x9b, 0x2e, 0x4c, 0xe2, 0x13, 0x49, 0x0f, 0xc9, - 0xc2, 0x34, 0xae, 0x5b, 0x6b, 0x46, 0xbc, 0xc4, 0x62, 0xa0, 0x4a, 0x18, - 0x62, 0x69, 0x1c, 0xc3, 0x78, 0x36, 0xfa, 0xd9, 0x8d, 0xd0, 0xf9, 0x4f, - 0x56, 0x90, 0x4b, 0xca, 0xc4, 0xdd, 0x64, 0x2c, 0xd1, 0x3c, 0xa8, 0xbe, - 0x62, 0x8f, 0x2a, 0x11, 0x93, 0x71, 0x75, 0x70, 0x43, 0xd0, 0x5f, 0xfb, - 0x36, 0x2b, 0x35, 0x26, 0xda, 0xda, 0x25, 0x3c, 0x17, 0xf2, 0xb7, 0x36, - 0xd7, 0x8d, 0xd1, 0xbc, 0x2f, 0xe7, 0xf8, 0x55, 0x42, 0x2e, 0xe1, 0xc0, - 0x4a, 0xee, 0x3d, 0x5b, 0xc9, 0x69, 0x15, 0xc5, 0x42, 0x03, 0x2c, 0x46, - 0x02, 0x94, 0x91, 0xfb, 0x0f, 0x98, 0x8d, 0x32, 0xdf, 0x0b, 0x19, 0xda, - 0x9f, 0x96, 0x6e, 0x2d, 0xc4, 0xa1, 0x92, 0xc1, 0x73, 0x2f, 0x23, 0x9f, - 0x55, 0xc5, 0xb4, 0x8c, 0xef, 0xf3, 0xa2, 0x94, 0x8f, 0x6c, 0xd8, 0xb1, - 0x9d, 0x0d, 0x17, 0x93, 0x21, 0xd7, 0xae, 0xa8, 0x41, 0xd3, 0xf1, 0x9a, - 0xe3, 0x36, 0xca, 0x5f, 0xa4, 0xd9, 0xaf, 0x34, 0xbf, 0xe6, 0x9e, 0x4c, - 0xf0, 0xd1, 0xb0, 0x8c, 0x8e, 0x76, 0x3d, 0xb3, 0xf7, 0xd9, 0xfb, 0xbf, - 0x72, 0xae, 0xa8, 0x39, 0x00, 0xe5, 0x53, 0x17, 0x6c, 0x4e, 0x06, 0x22, - 0xc0, 0x10, 0xe7, 0x4d, 0xff, 0x75, 0x03, 0x01, 0x18, 0x46, 0xfd, 0xde, - 0x1e, 0x95, 0x46, 0xb8, 0x5b, 0x36, 0xbc, 0x1d, 0x95, 0x05, 0x8f, 0x5d, - 0x38, 0x41, 0x25, 0x2c, 0x9b, 0x34, 0x75, 0x9b, 0xf0, 0x8b, 0xaf, 0x0d, - 0x2e, 0xc2, 0x1a, 0x03, 0x61, 0xbe, 0xe8, 0x49, 0xbc, 0x9b, 0x45, 0xfb, - 0x35, 0x2b, 0x6c, 0xa1, 0x96, 0xa0, 0x08, 0x0e, 0xca, 0x01, 0xc0, 0x97, - 0xfa, 0xdf, 0x11, 0x1a, 0x0d, 0xf9, 0xc2, 0x5a, 0xe1, 0x4c, 0xb5, 0x37, - 0xff, 0x91, 0xb6, 0x96, 0xbf, 0x62, 0x04, 0x59, 0x69, 0x01, 0x68, 0x66, - 0x52, 0x66, 0x4a, 0x49, 0xe9, 0xe6, 0xe4, 0x44, 0x92, 0x5e, 0xaf, 0xf5, - 0x24, 0xdb, 0x6f, 0x21, 0xf9, 0x21, 0x58, 0x5f, 0xc4, 0xf0, 0x30, 0x90, - 0x68, 0xff, 0x58, 0x5c, 0xbd, 0x6f, 0x58, 0x77, 0xe0, 0x03, 0x68, 0x2a, - 0x1a, 0xa4, 0xd6, 0x9d, 0xd0, 0x38, 0x5a, 0xbd, 0x52, 0xa8, 0xc5, 0xf0, - 0xbc, 0xf2, 0x04, 0x49, 0x0e, 0x1b, 0x1b, 0x93, 0xc0, 0x65, 0xca, 0x05, - 0x42, 0x11, 0x03, 0xd6, 0xd5, 0x2c, 0x4c, 0xcd, 0xed, 0xb4, 0x54, 0xa4, - 0x3d, 0x46, 0x64, 0x4c, 0xc4, 0x8f, 0x0a, 0x95, 0x6a, 0x4f, 0xfb, 0x2e, - 0x1d, 0x5a, 0x8a, 0xcb, 0x31, 0x94, 0x21, 0x54, 0x51, 0xf5, 0x4e, 0x3e, - 0x32, 0x00, 0x12, 0x8e, 0x4c, 0x8c, 0x17, 0x90, 0xea, 0x8d, 0xfe, 0xc3, - 0xfe, 0x69, 0x10, 0xd9, 0x1c, 0x60, 0x91, 0xb6, 0xbb, 0x11, 0xb7, 0x77, - 0x1c, 0x69, 0xec, 0xb5, 0x28, 0x1e, 0x4b, 0xc8, 0xac, 0xe2, 0xe7, 0xe4, - 0xca, 0x1c, 0x6a, 0x16, 0xb8, 0x0a, 0x1c, 0xcb, 0xbd, 0x0e, 0x61, 0xf6, - 0x30, 0xa0, 0xb0, 0x11, 0x57, 0xd0, 0xa0, 0xe5, 0x63, 0xb4, 0x5e, 0x65, - 0x54, 0xbd, 0x2b, 0xcf, 0x92, 0xb3, 0xe2, 0xad, 0xba, 0x6b, 0xd8, 0x8b, - 0xd4, 0xc9, 0x49, 0x6b, 0xe9, 0x6f, 0x30, 0x9a, 0x8d, 0x1a, 0xd2, 0x73, - 0xed, 0x01, 0x20, 0x76, 0x59, 0x3b, 0x63, 0x15, 0xf7, 0x4a, 0x93, 0xf5, - 0xe8, 0xaa, 0x77, 0xf7, 0xee, 0x16, 0x26, 0x6d, 0x6d, 0x1e, 0xb3, 0x04, - 0xd1, 0x36, 0x6d, 0xdb, 0xe1, 0xee, 0xdf, 0x69, 0x0e, 0x28, 0x3b, 0x5a, - 0x37, 0x51, 0x61, 0x10, 0x58, 0xd0, 0x58, 0x75, 0x63, 0x5b, 0x76, 0x3e, - 0x55, 0x0a, 0x07, 0x3e, 0xfe, 0xb9, 0x6e, 0x4c, 0xfc, 0x1b, 0x8a, 0xa5, - 0x03, 0x1a, 0xb9, 0x04, 0x22, 0x60, 0x33, 0x66, 0xda, 0xb7, 0x1c, 0x3a, - 0xb6, 0x92, 0x45, 0x01, 0xc2, 0x73, 0x49, 0x6a, 0x9a, 0x54, 0x10, 0xe2, - 0x36, 0x45, 0xbd, 0x1d, 0x33, 0x2a, 0xd2, 0xc9, 0x70, 0x63, 0x39, 0xcf, - 0xf7, 0x76, 0x70, 0x37, 0xde, 0x23, 0x4c, 0xd2, 0xa1, 0x37, 0x2c, 0x52, - 0xae, 0xa3, 0xfb, 0x45, 0xd0, 0xb9, 0x46, 0x3e, 0x2a, 0xe8, 0xe9, 0x64, - 0xe1, 0x16, 0x30, 0x08, 0x36, 0xcd, 0x9e, 0x15, 0x44, 0xdd, 0x27, 0xa9, - 0x1c, 0x29, 0xf1, 0xa7, 0x20, 0x21, 0x59, 0x61, 0x4c, 0xbe, 0x5e, 0x20, - 0x36, 0xca, 0xb8, 0x6d, 0xb5, 0x0c, 0x29, 0x41, 0xa1, 0xd3, 0x8a, 0x2b, - 0x34, 0xd2, 0x5b, 0x92, 0x12, 0x1f, 0x36, 0x9f, 0x5d, 0x02, 0x2a, 0xca, - 0xac, 0x5b, 0x29, 0x8b, 0x51, 0x3a, 0x65, 0xf5, 0xdf, 0x60, 0x6c, 0x0c, - 0xa7, 0x95, 0x3d, 0x52, 0x13, 0xb4, 0xbd, 0x8c, 0xf1, 0xac, 0xba, 0x3c, - 0x24, 0x6c, 0xc0, 0xdb, 0xa8, 0x5b, 0xd4, 0xdb, 0xf5, 0xcd, 0xaf, 0xdf, - 0x2f, 0xe2, 0x71, 0xcc, 0x00, 0x3a, 0x87, 0xdc, 0x23, 0xdf, 0xa7, 0xb0, - 0xb6, 0xcb, 0xff, 0x1c, 0xe7, 0xfe, 0xa8, 0xa8, 0xea, 0xad, 0x37, 0x58, - 0xfd, 0x58, 0x01, 0xa5, 0xe4, 0x5d, 0xdf, 0x4a, 0x10, 0x0b, 0xc3, 0x5e, - 0xd1, 0x0d, 0x4c, 0x21, 0x0e, 0x51, 0x95, 0x99, 0x58, 0xdf, 0x6d, 0xa8, - 0x8e, 0xf7, 0x51, 0xa6, 0x53, 0x44, 0x6b, 0xb3, 0x00, 0x64, 0xe1, 0x6f, - 0x3d, 0x19, 0x40, 0x30, 0x46, 0x95, 0x9b, 0x39, 0xa5, 0x0d, 0x77, 0xaa, - 0xb1, 0x57, 0x57, 0x08, 0xe0, 0xab, 0xd1, 0xd5, 0x25, 0x59, 0x11, 0x2f, - 0x62, 0xbf, 0x50, 0x95, 0x02, 0x18, 0xdb, 0x2d, 0xbc, 0xdb, 0xfa, 0x3d, - 0x45, 0xab, 0xb5, 0x2e, 0x8e, 0x9b, 0x49, 0xe5, 0x50, 0xbd, 0x1f, 0x1c, - 0x64, 0xd8, 0x9d, 0x0c, 0x0c, 0xe8, 0xf3, 0x54, 0x49, 0x95, 0x3d, 0x71, - 0xa1, 0x16, 0x98, 0x08, 0x16, 0x37, 0x6a, 0x95, 0xa3, 0xaa, 0xb6, 0xf7, - 0x0e, 0x99, 0x2a, 0x0b, 0x68, 0x49, 0xd1, 0xa4, 0x33, 0x3e, 0x57, 0xfc, - 0xc3, 0x5a, 0xa9, 0x1e, 0xbf, 0xf1, 0x19, 0x2d, 0xee, 0xfa, 0x01, 0xa8, - 0x64, 0x0d, 0x74, 0x54, 0xed, 0x4d, 0xab, 0xad, 0x23, 0x25, 0xde, 0xef, - 0xb4, 0x54, 0xfe, 0x3f, 0xba, 0xe0, 0x0e, 0x76, 0x1b, 0x1a, 0xa9, 0xe3, - 0x53, 0xbd, 0xde, 0x65, 0x6b, 0x08, 0x6d, 0x71, 0x45, 0xb4, 0xf8, 0x9a, - 0x06, 0x3d, 0xae, 0x87, 0x25, 0x51, 0x9d, 0x46, 0x33, 0xf3, 0x77, 0x6d, - 0xb6, 0x5d, 0xbe, 0x08, 0xfc, 0xf5, 0x31, 0xa1, 0xd5, 0x22, 0x19, 0xcd, - 0x66, 0x82, 0x19, 0xf5, 0xf5, 0x29, 0x28, 0x83, 0xa5, 0xa3, 0x30, 0x50, - 0xa1, 0xfb, 0xf6, 0x36, 0x31, 0xbf, 0xb5, 0xc4, 0xe7, 0x99, 0xd5, 0x4f, - 0xf5, 0xb0, 0xf5, 0x9a, 0x12, 0x4e, 0x1b, 0xdb, 0x4d, 0x21, 0x6d, 0xda, - 0xeb, 0x6a, 0x11, 0x55, 0xa2, 0xe2, 0x6a, 0xe9, 0xe8, 0x01, 0xa1, 0x97, - 0x68, 0xc2, 0x30, 0xd2, 0xfa, 0x60, 0xec, 0x4d, 0x54, 0x5b, 0x9e, 0x2d, - 0x97, 0xca, 0x1b, 0xc2, 0xb2, 0x14, 0x3f, 0xaf, 0x23, 0x54, 0xe8, 0x0c, - 0x3c, 0xed, 0x50, 0x32, 0xff, 0x3a, 0x8c, 0xe6, 0xdc, 0x17, 0xad, 0x65, - 0x05, 0x35, 0x28, 0xc9, 0x77, 0x21, 0xb1, 0x9a, 0xec, 0xf1, 0xd6, 0x53, - 0xb9, 0xb3, 0xe0, 0x41, 0x11, 0x85, 0x2e, 0x1a, 0xb5, 0xad, 0xab, 0x9b, - 0xae, 0x69, 0xa0, 0xb1, 0xa0, 0x07, 0x72, 0x8f, 0x4a, 0xd9, 0x5e, 0x1f, - 0x29, 0x9e, 0x4d, 0x0b, 0x9a, 0x82, 0xfe, 0x26, 0xc5, 0x17, 0x5b, 0x51, - 0x46, 0xf2, 0xf7, 0x27, 0xba, 0x06, 0x91, 0x0e, 0xc2, 0x07, 0xb3, 0x1b, - 0x54, 0xad, 0xb5, 0xf5, 0x02, 0xc1, 0x39, 0x6a, 0x2a, 0xd7, 0x46, 0xbf, - 0x3d, 0x39, 0x4e, 0x8e, 0xb1, 0x58, 0xf4, 0x90, 0xa7, 0x08, 0x0e, 0x99, - 0x64, 0x33, 0x3e, 0x1e, 0x09, 0xb7, 0x88, 0xa0, 0x29, 0xb2, 0x0b, 0x5c, - 0x15, 0xd4, 0x36, 0x55, 0x42, 0x48, 0xe7, 0x47, 0xf9, 0xb5, 0x05, 0xcd, - 0x40, 0xde, 0x92, 0x27, 0x11, 0x3b, 0xad, 0x3e, 0x9b, 0x95, 0x38, 0xad, - 0x11, 0xd5, 0x9d, 0x1d, 0x38, 0x60, 0xde, 0x31, 0xe3, 0x40, 0xb2, 0xf2, - 0x8e, 0xb4, 0x03, 0xaa, 0x51, 0x15, 0xe4, 0x36, 0x4d, 0x43, 0x05, 0xbc, - 0x36, 0x82, 0xdf, 0xfc, 0xfd, 0x23, 0x4d, 0xad, 0x9f, 0xf4, 0xce, 0xfb, - 0xaf, 0x46, 0xb3, 0x59, 0x98, 0x91, 0x85, 0x4a, 0xa7, 0x67, 0x70, 0xbd, - 0xca, 0x12, 0x9b, 0x6b, 0x00, 0xe5, 0x82, 0x3c, 0x37, 0x99, 0x8d, 0x6b, - 0x32, 0xaf, 0x08, 0x05, 0x36, 0xd6, 0xd7, 0xfb, 0x65, 0xce, 0x4e, 0x9f, - 0xd5, 0xd1, 0x3a, 0x42, 0xb0, 0x31, 0x62, 0xd0, 0xe2, 0xe5, 0x37, 0xc1, - 0x6d, 0x8a, 0x24, 0xa4, 0x19, 0xc2, 0x59, 0x3c, 0x44, 0xef, 0x96, 0xf6, - 0x35, 0x00, 0xe7, 0xe6, 0x2e, 0x82, 0xa5, 0x4a, 0x2f, 0xa2, 0xfe, 0x1f, - 0x53, 0x52, 0x31, 0x97, 0x47, 0x37, 0x15, 0x26, 0xa7, 0x8d, 0xd3, 0x21, - 0x6a, 0x98, 0x6d, 0xf1, 0xe6, 0x29, 0xf8, 0x9d, 0xaf, 0x5f, 0x3e, 0x3a, - 0xbc, 0x65, 0xb2, 0xd8, 0x41, 0xbc, 0xd6, 0x39, 0x3c, 0xc7, 0x2f, 0x2e, - 0xa3, 0x08, 0x9a, 0x21, 0x05, 0xe0, 0x4c, 0x06, 0x4d, 0x82, 0x68, 0x5d, - 0x4a, 0x9e, 0xca, 0xee, 0x3d, 0x28, 0x45, 0x0e, 0xff, 0xdd, 0xe6, 0x46, - 0xbc, 0xf8, 0x19, 0x5b, 0xda, 0xf4, 0x14, 0xd1, 0x4f, 0x02, 0x6e, 0xf6, - 0x01, 0x2d, 0xd6, 0xb6, 0x8b, 0xf5, 0x9c, 0x4e, 0xee, 0xe7, 0xc8, 0x10, - 0x05, 0xb6, 0x6d, 0x8d, 0x49, 0xe2, 0x04, 0xec, 0x4d, 0x61, 0x67, 0xc2, - 0x19, 0x27, 0xab, 0xe1, 0x0d, 0x29, 0xab, 0xf2, 0xa0, 0xf9, 0x69, 0x0d, - 0x81, 0x29, 0x4d, 0x40, 0x6d, 0xd7, 0xda, 0xb7, 0x9e, 0x0b, 0x90, 0x9c, - 0x9b, 0xeb, 0x59, 0x2c, 0xc9, 0xa4, 0x85, 0x95, 0xe2, 0xda, 0x2d, 0xe4, - 0x60, 0x9a, 0x64, 0x21, 0xbf, 0x1d, 0x57, 0x4d, 0x3e, 0xa0, 0x35, 0x0f, - 0xce, 0xd7, 0xe1, 0x44, 0x63, 0x9e, 0xe8, 0x8e, 0xbd, 0xc8, 0xc1, 0x65, - 0xe1, 0xd2, 0x09, 0x45, 0xd3, 0xbd, 0x13, 0xb2, 0x1f, 0x46, 0x32, 0xa6, - 0xcd, 0xa3, 0x44, 0x4c, 0x52, 0xa7, 0xe7, 0x54, 0xea, 0xe6, 0xa0, 0xce, - 0x02, 0x8b, 0x69, 0xdb, 0xde, 0xef, 0x5f, 0xcb, 0x6f, 0x6e, 0x0f, 0xf5, - 0x68, 0x42, 0xf4, 0x37, 0x08, 0x1f, 0x87, 0x55, 0xb4, 0xbc, 0x8a, 0x84, - 0x84, 0x10, 0xc6, 0x36, 0x3e, 0x8a, 0x6b, 0x4e, 0xd5, 0xc8, 0x64, 0xcb, - 0xb5, 0xc0, 0xfe, 0x99, 0x66, 0xaa, 0xb1, 0x50, 0xa7, 0x70, 0xd9, 0xa6, - 0x17, 0x2d, 0xd4, 0xad, 0xdf, 0xf2, 0x2f, 0xac, 0xae, 0xae, 0x12, 0xcf, - 0x5b, 0x09, 0xf2, 0x2d, 0xb4, 0x21, 0xc9, 0xd1, 0x58, 0xdb, 0x4e, 0x9b, - 0xe0, 0x32, 0x08, 0xe4, 0x4a, 0xe6, 0x9c, 0x61, 0x25, 0x90, 0x08, 0xf2, - 0xb1, 0xc1, 0x3c, 0x25, 0x0b, 0x5a, 0x03, 0x40, 0xdb, 0x06, 0x5f, 0xd2, - 0x60, 0x8e, 0x0a, 0x5b, 0xc8, 0xa2, 0xcd, 0xac, 0xb3, 0x54, 0x0b, 0xb6, - 0x05, 0x45, 0xd7, 0xa8, 0x8a, 0xfa, 0x8a, 0xba, 0x09, 0x53, 0x81, 0xd7, - 0xf5, 0x40, 0x61, 0x46, 0xf2, 0x22, 0xe4, 0x21, 0xb4, 0x26, 0x41, 0x10, - 0x25, 0x4d, 0x93, 0xc2, 0xa2, 0xae, 0xc3, 0xaa, 0xbe, 0x71, 0xa6, 0xaa, - 0xf7, 0xb1, 0xbf, 0x02, 0x22, 0xe9, 0xd7, 0xfb, 0xaa, 0x1d, 0x5d, 0xf5, - 0xe7, 0x5b, 0x63, 0xf2, 0xe6, 0x5c, 0xd6, 0x24, 0x6d, 0xb5, 0xca, 0xa3, - 0xe7, 0x57, 0x1a, 0xa5, 0xf7, 0x95, 0xc5, 0x92, 0x51, 0x65, 0x68, 0xc5, - 0xe6, 0x27, 0xa9, 0x94, 0x8a, 0xb6, 0xec, 0x0d, 0x9c, 0x51, 0xdf, 0x22, - 0xca, 0xdf, 0x5a, 0xf5, 0xe4, 0xad, 0xf4, 0xfc, 0x1f, 0x68, 0x9f, 0xdb, - 0x40, 0x4e, 0x6a, 0x1e, 0x5a, 0xd8, 0x6c, 0xd6, 0xef, 0xad, 0x64, 0xe7, - 0xcb, 0xfc, 0x44, 0xae, 0xa5, 0x62, 0x65, 0xad, 0x2e, 0x6a, 0x46, 0xcf, - 0x0d, 0xd0, 0x46, 0x5e, 0x87, 0x37, 0xb6, 0xab, 0x70, 0x52, 0xee, 0x5a, - 0xa7, 0x13, 0xa3, 0xc3, 0x4b, 0x62, 0xe7, 0x31, 0x10, 0xed, 0x39, 0x1c, - 0x4a, 0xe3, 0xc1, 0x57, 0xcb, 0x45, 0xe4, 0x89, 0xee, 0x0e, 0x24, 0xc1, - 0xa6, 0xac, 0xd4, 0x0e, 0x9b, 0xe0, 0x26, 0x28, 0x08, 0x2b, 0xe1, 0xc9, - 0x42, 0x37, 0xa3, 0x46, 0xcc, 0x5d, 0x89, 0x10, 0x1f, 0x23, 0xcb, 0x1c, - 0x67, 0xe2, 0x6d, 0xaa, 0x66, 0xa5, 0xf5, 0xea, 0x94, 0x2b, 0x8c, 0xf6, - 0xf4, 0xd3, 0xfb, 0x9c, 0x96, 0x0a, 0x87, 0xaf, 0x5c, 0x19, 0xb4, 0x3b, - 0x26, 0xb2, 0x48, 0x55, 0x97, 0xfd, 0x3a, 0xec, 0x06, 0xe4, 0x58, 0x99, - 0x9a, 0x26, 0x4f, 0xe0, 0x9c, 0x67, 0x09, 0x05, 0x5b, 0x72, 0x8e, 0xd6, - 0xe4, 0x4e, 0xe2, 0x63, 0xb0, 0x9c, 0xf6, 0x92, 0xd3, 0x05, 0x3f, 0xb0, - 0x04, 0x5f, 0x02, 0x97, 0xf4, 0x42, 0x1d, 0x3b, 0x5c, 0x44, 0x00, 0x95, - 0x8b, 0xf5, 0x06, 0x40, 0xbd, 0xb8, 0xf7, 0x4b, 0x4a, 0xfa, 0xf0, 0x04, - 0x04, 0xd0, 0xa5, 0xb9, 0x3a, 0xa0, 0x2d, 0x0c, 0x1b, 0xec, 0x5a, 0x14, - 0xc8, 0x1d, 0x93, 0x86, 0xfd, 0x16, 0x68, 0xf8, 0x16, 0x9b, 0xb4, 0x88, - 0x99, 0x63, 0x0e, 0xd5, 0x20, 0x07, 0x43, 0x28, 0x26, 0xba, 0xf9, 0x97, - 0xed, 0x6b, 0x40, 0xb8, 0x07, 0x73, 0x59, 0xd5, 0x55, 0xa8, 0x64, 0x14, - 0x1c, 0xc5, 0xc0, 0x1f, 0x8d, 0x09, 0xae, 0x9c, 0x66, 0xa1, 0x94, 0xca, - 0x14, 0x46, 0xed, 0x46, 0x46, 0x25, 0x63, 0x5b, 0x2b, 0x95, 0x85, 0x05, - 0xc2, 0xb7, 0xeb, 0x06, 0x30, 0x5a, 0xf6, 0x22, 0x4e, 0x47, 0x1e, 0x0e, - 0x0c, 0xad, 0xd5, 0x11, 0xa8, 0x6a, 0x89, 0xd5, 0x49, 0xd4, 0xfa, 0x43, - 0xb0, 0x32, 0xb0, 0xb9, 0xb3, 0xda, 0x3f, 0x4f, 0xac, 0x4c, 0xc1, 0xa7, - 0x9f, 0xc2, 0xc2, 0x04, 0x70, 0xa2, 0x08, 0x01, 0xeb, 0x10, 0xa4, 0xa5, - 0x4c, 0xcd, 0xb3, 0x81, 0x4e, 0xbe, 0x6c, 0x51, 0x44, 0xf8, 0x82, 0xbd, - 0x42, 0x34, 0xfb, 0xdb, 0xb4, 0x32, 0xd2, 0x93, 0x63, 0x5e, 0xf6, 0x07, - 0x6e, 0x2c, 0xc2, 0xcf, 0xf4, 0x5d, 0x84, 0xe9, 0x5e, 0x5c, 0xa8, 0x39, - 0x28, 0x4a, 0xed, 0x15, 0x1b, 0xea, 0xe6, 0xde, 0x85, 0x92, 0x86, 0xe7, - 0x83, 0x4b, 0x87, 0xf7, 0x23, 0x60, 0xe2, 0x22, 0xd3, 0x32, 0x16, 0x4e, - 0x2f, 0xde, 0x01, 0x8b, 0x48, 0xea, 0xcd, 0x8a, 0x8b, 0xbc, 0xc6, 0x64, - 0xb2, 0x67, 0x47, 0xf5, 0x98, 0xf8, 0xca, 0xf1, 0x83, 0x66, 0xd7, 0x9a, - 0xef, 0xca, 0x20, 0xc2, 0xec, 0x8c, 0x38, 0xb1, 0x37, 0x13, 0x93, 0x92, - 0xba, 0xa1, 0xee, 0x6a, 0x57, 0x43, 0xaa, 0xdc, 0xdf, 0xa4, 0x3f, 0xc6, - 0xb6, 0xd6, 0x68, 0x54, 0xab, 0x36, 0xe9, 0x0f, 0x6f, 0xd5, 0xa1, 0x1b, - 0xa1, 0x02, 0xc9, 0x41, 0xef, 0x4f, 0x86, 0xcc, 0x1a, 0xfa, 0xd2, 0xdd, - 0x87, 0x04, 0xe0, 0x27, 0x38, 0xcf, 0x91, 0x95, 0xb4, 0x02, 0x10, 0x1d, - 0xc3, 0xcc, 0x6f, 0xaf, 0xbc, 0x94, 0x64, 0x47, 0xbc, 0x37, 0xde, 0xe3, - 0x2e, 0x89, 0x03, 0xb6, 0xd3, 0x28, 0x4a, 0x5e, 0x6d, 0x1e, 0xc5, 0x1a, - 0xa5, 0x0c, 0x92, 0xf7, 0xe2, 0x19, 0xe7, 0x39, 0xf0, 0xf2, 0x49, 0x8b, - 0xe6, 0x99, 0xd8, 0x4b, 0x0d, 0x6e, 0x3f, 0x57, 0x89, 0x9e, 0x0d, 0x34, - 0x4b, 0x52, 0xcd, 0x18, 0x57, 0xc7, 0x8e, 0x48, 0x03, 0x65, 0xd4, 0xdd, - 0xdf, 0x04, 0xf5, 0x39, 0x5e, 0x97, 0xbc, 0xc0, 0xc5, 0x91, 0xe7, 0x9d, - 0xbe, 0x28, 0x4c, 0xe7, 0xf4, 0xa0, 0x34, 0xee, 0xba, 0xa7, 0x8d, 0x52, - 0xc4, 0x07, 0x14, 0xd2, 0x93, 0xb0, 0x1d, 0x61, 0x53, 0x23, 0xc3, 0xe1, - 0xd2, 0xbf, 0xe1, 0xd6, 0x1f, 0x27, 0xcc, 0x8c, 0xe7, 0x0b, 0x09, 0x4f, - 0xe6, 0xa2, 0x41, 0xf4, 0x31, 0xbe, 0x95, 0x17, 0xfb, 0x50, 0xa4, 0xa4, - 0x51, 0x3c, 0x6f, 0xf8, 0x6a, 0xba, 0xac, 0xe4, 0x1e, 0x38, 0x78, 0x18, - 0x58, 0x31, 0x69, 0xc9, 0x52, 0xb0, 0xfc, 0x71, 0x54, 0xad, 0xe2, 0x8e, - 0xa2, 0xf2, 0x8e, 0x58, 0x11, 0x1d, 0xcc, 0x30, 0x74, 0x55, 0x41, 0x02, - 0x9b, 0x2a, 0x2f, 0x17, 0x97, 0xe4, 0x1a, 0xd0, 0xd5, 0x8f, 0x60, 0x10, - 0xdb, 0xc2, 0x69, 0x94, 0x0d, 0xaf, 0x44, 0xd0, 0x95, 0x3d, 0x50, 0xf4, - 0x27, 0x5e, 0xdc, 0x56, 0x5f, 0xa7, 0x4c, 0x41, 0xe5, 0x9e, 0xc8, 0x31, - 0xb0, 0x8e, 0x3f, 0xde, 0xdc, 0x42, 0x24, 0x93, 0x98, 0xce, 0x69, 0x90, - 0x98, 0x73, 0x06, 0xb9, 0x8e, 0xa4, 0x8d, 0x97, 0xb1, 0x41, 0x33, 0x64, - 0x5a, 0xae, 0xe8, 0x2f, 0x5f, 0x99, 0x64, 0x3e, 0xea, 0xd4, 0xbe, 0xa2, - 0x52, 0x2d, 0xc7, 0x56, 0x46, 0xfb, 0x33, 0xd8, 0xde, 0xe6, 0x74, 0xf6, - 0x2e, 0x2a, 0x26, 0xa1, 0x07, 0xcd, 0x3c, 0xca, 0x39, 0x74, 0x61, 0x4a, - 0x53, 0xf7, 0x8c, 0xd7, 0x3c, 0x4f, 0x4f, 0xd9, 0x14, 0x74, 0x56, 0xa8, - 0x3b, 0x3b, 0xe4, 0xe5, 0x70, 0x2e, 0xda, 0xde, 0xcd, 0x65, 0x4f, 0x2e, - 0xb6, 0x76, 0x17, 0x59, 0x6a, 0xaf, 0x0a, 0x24, 0x8c, 0x99, 0x0b, 0x2a, - 0xac, 0x46, 0x74, 0x2c, 0x3b, 0x40, 0x20, 0xad, 0x30, 0xab, 0x63, 0x34, - 0x8f, 0x30, 0x22, 0x50, 0x5c, 0xf8, 0x73, 0x21, 0x3e, 0xeb, 0x16, 0x44, - 0x30, 0xb9, 0x59, 0x0f, 0xf0, 0xe5, 0xb6, 0x6a, 0xde, 0x32, 0x03, 0x28, - 0x3c, 0xc8, 0xc2, 0x8d, 0x6b, 0x72, 0x2f, 0x3e, 0x2b, 0x99, 0xc1, 0xa6, - 0xdf, 0x5a, 0x91, 0x2d, 0x40, 0x39, 0xb2, 0x24, 0x27, 0x25, 0x26, 0x51, - 0xbb, 0xb5, 0x6a, 0x47, 0x38, 0x94, 0x2c, 0x3e, 0xa0, 0x96, 0x19, 0xf7, - 0x99, 0x0c, 0x34, 0x41, 0xb9, 0x0d, 0xad, 0x37, 0xa6, 0x0c, 0x38, 0x9c, - 0xee, 0x03, 0x68, 0x62, 0x76, 0x64, 0x18, 0x63, 0x62, 0x10, 0xd6, 0x2a, - 0xca, 0xdb, 0x73, 0x9b, 0x93, 0x35, 0x29, 0xb0, 0xec, 0x6c, 0xa8, 0x1f, - 0xa6, 0xac, 0xf8, 0xd8, 0xfa, 0x98, 0xc3, 0x02, 0xf0, 0xf5, 0x66, 0x2c, - 0xfc, 0x75, 0xc7, 0xb0, 0x76, 0xfe, 0x0f, 0x92, 0x9b, 0xce, 0xc5, 0xe8, - 0x9a, 0x5e, 0x8f, 0x16, 0x26, 0x8c, 0x97, 0x20, 0x97, 0x36, 0xca, 0x56, - 0xed, 0xf2, 0x05, 0x53, 0xf7, 0x9f, 0x23, 0xbb, 0x1e, 0xdc, 0x5a, 0x94, - 0x0b, 0x1d, 0x0e, 0x55, 0xc7, 0x34, 0xff, 0xd9, 0xa3, 0x37, 0x69, 0x63, - 0x9f, 0x00, 0x0f, 0xa1, 0x5c, 0x1f, 0x50, 0x56, 0x25, 0xf0, 0xb8, 0x0e, - 0x92, 0x70, 0xcd, 0xa0, 0xca, 0x2a, 0xce, 0xa5, 0x21, 0xe7, 0x5b, 0x10, - 0x13, 0xd5, 0x9b, 0x9f, 0x60, 0x1b, 0x3f, 0x21, 0xa9, 0x27, 0xd9, 0xeb, - 0xdc, 0xe8, 0x05, 0x8e, 0x09, 0x27, 0x4b, 0x8b, 0xb1, 0x3b, 0x07, 0xb1, - 0xe9, 0x55, 0xc4, 0xab, 0x5d, 0x74, 0x11, 0xcf, 0x98, 0x5d, 0x47, 0x58, - 0x9d, 0x08, 0xec, 0x0b, 0x31, 0x69, 0x98, 0xad, 0xd0, 0x93, 0x09, 0xc7, - 0xcc, 0xe3, 0x64, 0x67, 0xef, 0xce, 0x98, 0xf3, 0xc2, 0x69, 0xd4, 0x47, - 0x4d, 0xf7, 0x1a, 0x10, 0xa9, 0x18, 0x35, 0x94, 0xc8, 0xe1, 0xd2, 0xf5, - 0xb5, 0xb4, 0x0b, 0xd7, 0x28, 0xa8, 0x97, 0x9b, 0xbf, 0x90, 0xe5, 0xc6, - 0xde, 0xf7, 0x4f, 0x33, 0xaf, 0x36, 0xe2, 0xa8, 0x65, 0x56, 0xdd, 0xe8, - 0x79, 0xae, 0x68, 0xc1, 0xf3, 0x5b, 0x26, 0x59, 0x53, 0x00, 0x43, 0x4c, - 0x3e, 0xf9, 0x24, 0xc4, 0x8d, 0x73, 0x00, 0x6c, 0xb2, 0x97, 0x56, 0x90, - 0x42, 0xde, 0xba, 0xd6, 0x3a, 0x6d, 0x39, 0x9d, 0xbe, 0x1c, 0xca, 0x24, - 0xbb, 0xba, 0x06, 0xf0, 0x59, 0x74, 0x32, 0x99, 0x1b, 0x02, 0xad, 0xc1, - 0x8b, 0xd4, 0x0b, 0xd8, 0xb7, 0xe7, 0xbd, 0xbd, 0x68, 0x56, 0xc1, 0x1e, - 0xda, 0xa4, 0xfe, 0x6b, 0x94, 0xf3, 0xda, 0x9a, 0x33, 0x01, 0x97, 0xb6, - 0x39, 0xc4, 0xe7, 0x57, 0xee, 0xcf, 0x0e, 0xce, 0x40, 0x7a, 0xd4, 0x4d, - 0x30, 0x6a, 0x57, 0x8f, 0x97, 0x92, 0x59, 0xeb, 0xf2, 0x18, 0x8c, 0x77, - 0xd9, 0x8f, 0x72, 0xff, 0xd5, 0xb2, 0x1f, 0x2e, 0xba, 0xb6, 0x46, 0x1a, - 0x33, 0xe0, 0x74, 0x2a, 0xd7, 0xdb, 0xc7, 0x07, 0x37, 0x2f, 0x55, 0xe2, - 0x70, 0x43, 0xc2, 0xbc, 0x33, 0x03, 0xc9, 0xd4, 0x4e, 0x6e, 0x3e, 0xc9, - 0x67, 0x55, 0xf8, 0x6d, 0x63, 0x9f, 0x6b, 0x3f, 0x5b, 0xc7, 0xe9, 0xb8, - 0x31, 0x04, 0x0b, 0x71, 0x15, 0xcd, 0x34, 0xe4, 0xaf, 0x74, 0x73, 0xea, - 0xbf, 0x20, 0x00, 0x75, 0xd7, 0xa7, 0xf7, 0x9c, 0xf5, 0xa1, 0x28, 0xc7, - 0xfe, 0x6b, 0xa2, 0x36, 0xdc, 0xd4, 0xf0, 0xd7, 0x42, 0x4e, 0xe4, 0x3f, - 0x00, 0x09, 0x3c, 0x5e, 0x1f, 0xc8, 0xfd, 0xb9, 0xd8, 0x90, 0xdb, 0xf4, - 0x41, 0x0b, 0xda, 0x68, 0xe1, 0xe4, 0xb9, 0xfb, 0x36, 0x37, 0xa9, 0x5f, - 0xc9, 0xb6, 0xb8, 0xa4, 0xda, 0x41, 0xaa, 0xab, 0xa8, 0xc8, 0xd3, 0xc6, - 0x6a, 0xbe, 0x03, 0x77, 0xcc, 0x1a, 0x8d, 0x0d, 0xe8, 0xcc, 0x58, 0x46, - 0x71, 0x33, 0x19, 0x62, 0xe5, 0xc4, 0xe3, 0x4a, 0x1d, 0xf7, 0x96, 0xd4, - 0x08, 0xe5, 0xa8, 0x18, 0x40, 0x2d, 0xc5, 0xd7, 0xa7, 0x31, 0xa2, 0x5f, - 0x60, 0xde, 0x21, 0xe5, 0xaa, 0x65, 0x93, 0x0d, 0xdb, 0x55, 0x54, 0x88, - 0xbd, 0x53, 0x8e, 0xe0, 0xa6, 0x23, 0xcd, 0x1d, 0xb7, 0xbd, 0x2a, 0x8c, - 0x0e, 0x67, 0x65, 0xab, 0xda, 0xe9, 0x3b, 0x12, 0xf6, 0x97, 0x4b, 0xe8, - 0x16, 0xf7, 0x09, 0xb6, 0x45, 0x97, 0x16, 0xec, 0xd9, 0xdc, 0x8d, 0x01, - 0xba, 0xb0, 0xb6, 0xdd, 0x59, 0x60, 0xbf, 0x92, 0x92, 0xc3, 0x21, 0x41, - 0x46, 0xcb, 0x5e, 0x6e, 0x99, 0x10, 0x41, 0x45, 0x9a, 0xb9, 0xe0, 0x6d, - 0x22, 0x68, 0xd3, 0x5a, 0xaa, 0x6e, 0xb4, 0xc6, 0x42, 0xa2, 0xad, 0xf1, - 0xf7, 0x0b, 0x3d, 0x29, 0x38, 0xa2, 0x11, 0xf8, 0x57, 0x25, 0xb8, 0x8f, - 0xbc, 0x65, 0xac, 0x0d, 0xf0, 0xb7, 0x5c, 0x95, 0xfb, 0x5d, 0xdb, 0x54, - 0x3d, 0x3e, 0xd6, 0x4f, 0x2a, 0xfe, 0x43, 0xfc, 0x1c, 0xca, 0xb9, 0xb3, - 0x95, 0x06, 0x90, 0xd9, 0x5d, 0x43, 0xc4, 0xe9, 0xbb, 0x17, 0xd6, 0xaf, - 0xf2, 0xb0, 0x24, 0x9d, 0x27, 0xdf, 0xaf, 0xf7, 0x6f, 0xd1, 0x4c, 0xbe, - 0xd0, 0x1d, 0x16, 0x3f, 0xf5, 0x23, 0xdb, 0x52, 0xc4, 0x3b, 0x99, 0x3d, - 0xd5, 0xdc, 0x0b, 0x54, 0x3b, 0xfd, 0x9d, 0x36, 0xf6, 0xd9, 0x63, 0xd4, - 0xc0, 0x8f, 0x9d, 0x00, 0xa6, 0x1e, 0x41, 0x72, 0x18, 0xa6, 0xc5, 0xd0, - 0xb6, 0xdd, 0x10, 0x61, 0x45, 0xe0, 0xdc, 0xcc, 0x92, 0xd3, 0x05, 0x54, - 0x26, 0x2c, 0xcf, 0x94, 0x67, 0xa5, 0xae, 0x62, 0x97, 0x4e, 0x10, 0x2b, - 0xf4, 0x65, 0x89, 0x21, 0x98, 0xad, 0x25, 0x6a, 0x01, 0xa9, 0x4f, 0x57, - 0x2b, 0xbe, 0x3b, 0xcc, 0x34, 0x89, 0xc3, 0xd2, 0xa0, 0xc5, 0x72, 0xd9, - 0x39, 0x3f, 0x45, 0x62, 0x73, 0xda, 0xf3, 0xe7, 0xbf, 0xfd, 0xfe, 0x5b, - 0xe0, 0xc5, 0x9f, 0xf9, 0xbe, 0x2b, 0x9a, 0xf7, 0xc2, 0xe9, 0x59, 0x73, - 0xc4, 0x0a, 0xfe, 0x73, 0x5b, 0x34, 0xb9, 0xfc, 0x45, 0xb7, 0x4d, 0x39, - 0xc2, 0xcd, 0x5f, 0x33, 0x91, 0xab, 0x48, 0x57, 0x0a, 0x27, 0xf3, 0xd4, - 0xf3, 0xb4, 0x57, 0x04, 0xeb, 0x8a, 0xb2, 0xd4, 0x06, 0x60, 0x09, 0x48, - 0x58, 0xf8, 0x1f, 0x06, 0x8c, 0x2d, 0x55, 0x2b, 0x8d, 0xbb, 0x37, 0xbb, - 0xc5, 0xa3, 0x05, 0x38, 0xf7, 0x47, 0x0a, 0xd9, 0xa8, 0x5a, 0x5b, 0x75, - 0x58, 0xa3, 0x35, 0x01, 0x1a, 0x5c, 0xe3, 0x97, 0xef, 0x04, 0xd9, 0x28, - 0x93, 0xc9, 0x59, 0xfc, 0xc1, 0x9b, 0x25, 0xe8, 0x44, 0x05, 0x17, 0xdc, - 0xe1, 0xb2, 0x06, 0xd6, 0x08, 0xe0, 0x00, 0xe0, 0x06, 0xaf, 0xb6, 0xf8, - 0x63, 0x6c, 0x54, 0x29, 0x7a, 0x25, 0x0c, 0xc4, 0xe7, 0x6c, 0x2b, 0xe8, - 0xe9, 0x06, 0xa4, 0x9e, 0xb0, 0x38, 0xd4, 0xf1, 0x46, 0xb3, 0x93, 0x54, - 0xa7, 0xa1, 0xcd, 0x65, 0x43, 0xe8, 0xc3, 0x03, 0x60, 0x9c, 0x39, 0x02, - 0xea, 0xc5, 0x0c, 0x96, 0xd2, 0x05, 0x0d, 0x1f, 0xc7, 0x04, 0xc4, 0xa3, - 0xc4, 0xc0, 0xa9, 0x0b, 0xc7, 0xa1, 0x3f, 0xdc, 0x35, 0x51, 0x4d, 0xc8, - 0xc2, 0x87, 0x99, 0x3c, 0x46, 0xb3, 0x4e, 0xc9, 0xbf, 0xb3, 0x34, 0x8b, - 0xb7, 0x6f, 0xe5, 0x95, 0x9b, 0x17, 0x20, 0x56, 0xa6, 0x64, 0x4c, 0x77, - 0xdc, 0x0e, 0x28, 0xc3, 0xef, 0xf4, 0x28, 0x47, 0xd4, 0x0c, 0x6a, 0xe1, - 0x75, 0x63, 0xc9, 0xae, 0xe9, 0x36, 0x57, 0xfd, 0x08, 0x2f, 0xb2, 0x0b, - 0x48, 0xd4, 0x04, 0x24, 0x2f, 0x17, 0x03, 0x9e, 0xfe, 0xfd, 0x67, 0x0e, - 0xbe, 0x66, 0xcf, 0x2c, 0xaa, 0x4f, 0x1c, 0x32, 0x2e, 0xa0, 0xfb, 0x55, - 0x40, 0x15, 0x5d, 0x51, 0xca, 0xbe, 0xff, 0xb2, 0xb2, 0x2b, 0x47, 0xee, - 0x37, 0xc8, 0x65, 0xad, 0xda, 0xb9, 0x3a, 0x75, 0x3a, 0x98, 0x1f, 0xcf, - 0xd7, 0x48, 0x56, 0xa2, 0xed, 0xb4, 0x46, 0x60, 0x30, 0x6a, 0x19, 0x5b, - 0x38, 0xc8, 0x0d, 0x3a, 0xc3, 0xe1, 0x34, 0x6e, 0x39, 0x5f, 0xf2, 0x4d, - 0x78, 0x02, 0xba, 0x3c, 0x71, 0x70, 0x75, 0x6c, 0xb0, 0xfa, 0x38, 0xe3, - 0x6b, 0x42, 0x1e, 0x23, 0xcd, 0xe6, 0xf8, 0xc5, 0x9c, 0x24, 0x3d, 0x98, - 0xa8, 0xbb, 0x4a, 0x07, 0x8c, 0xb6, 0xfa, 0x13, 0xd0, 0xfc, 0xc5, 0xdc, - 0xb2, 0xcd, 0x65, 0x59, 0xc2, 0x3a, 0x24, 0x47, 0x1c, 0x53, 0x92, 0x57, - 0x21, 0xf3, 0x26, 0x9b, 0xe9, 0xa5, 0x95, 0x9a, 0xd6, 0xa5, 0xe2, 0xda, - 0x0e, 0xb7, 0xab, 0x9e, 0xee, 0xe3, 0xef, 0x59, 0xd2, 0x88, 0x32, 0x1f, - 0x0d, 0xbf, 0xf2, 0xa4, 0x3b, 0xd7, 0xd5, 0xf2, 0xa4, 0xae, 0x65, 0xab, - 0xb3, 0x72, 0xf6, 0x3b, 0xe8, 0xc5, 0x2b, 0xad, 0xcc, 0xbe, 0x02, 0x95, - 0x63, 0x95, 0x2c, 0x22, 0x74, 0x3a, 0x1b, 0xd5, 0xd1, 0x1d, 0xf8, 0x69, - 0x03, 0x98, 0x70, 0x66, 0x43, 0xb5, 0x6d, 0xd0, 0x27, 0x6a, 0x1c, 0xfc, - 0xf9, 0xaf, 0x71, 0x9b, 0x8c, 0xcb, 0xf8, 0xbd, 0x18, 0xad, 0x5f, 0xb7, - 0xbc, 0xfb, 0xbd, 0xde, 0xb9, 0xdc, 0x54, 0x65, 0x3b, 0xaf, 0xa7, 0x92, - 0xbe, 0x62, 0xdc, 0x25, 0x50, 0x48, 0x78, 0xd4, 0xed, 0xed, 0x96, 0x3f, - 0x53, 0xc5, 0xb5, 0x5f, 0xac, 0xa7, 0x5c, 0x92, 0xd9, 0xfe, 0x3b, 0xcd, - 0xbb, 0x29, 0xa0, 0xe0, 0x1e, 0xb0, 0x92, 0xad, 0x6b, 0x45, 0x29, 0x59, - 0xff, 0x5d, 0x5a, 0xfe, 0x8f, 0x63, 0x86, 0x6d, 0xa4, 0x4a, 0x53, 0xc4, - 0x3e, 0x39, 0xbf, 0xe5, 0x20, 0xbc, 0xd1, 0xdf, 0x59, 0x9c, 0x3a, 0x72, - 0x3b, 0x8f, 0xb2, 0x40, 0xe5, 0x9e, 0xa5, 0x02, 0x35, 0xd0, 0x4d, 0x6f, - 0x7d, 0xd5, 0x4c, 0xde, 0x51, 0x0a, 0x9a, 0x57, 0x43, 0x43, 0xe5, 0x97, - 0x95, 0x4b, 0xb2, 0x6c, 0xaf, 0x92, 0x4e, 0x52, 0x06, 0x0b, 0x72, 0x60, - 0x9e, 0x5c, 0xa1, 0xe3, 0x9b, 0xb3, 0x8c, 0x32, 0xcd, 0xc1, 0x4a, 0x88, - 0xd6, 0x3d, 0xed, 0xe8, 0x42, 0x5d, 0x53, 0xdd, 0x00, 0x52, 0x26, 0x2e, - 0xd5, 0x41, 0xf2, 0xfc, 0x51, 0x40, 0x45, 0xe4, 0x00, 0xe3, 0x1c, 0xfb, - 0x32, 0x33, 0x22, 0xed, 0x15, 0x12, 0x9b, 0xc4, 0x89, 0xd0, 0x0e, 0x95, - 0xad, 0xfd, 0x04, 0x2e, 0xee, 0x73, 0x06, 0xee, 0x23, 0xe2, 0xd3, 0x3d, - 0x44, 0x62, 0x35, 0xdc, 0x18, 0x9d, 0xf4, 0x9d, 0x92, 0x00, 0x4e, 0x8e, - 0x4e, 0x24, 0xa1, 0x2c, 0xb2, 0xb2, 0x3f, 0xfc, 0xe4, 0x27, 0x43, 0x3b, - 0x59, 0xb4, 0x13, 0xff, 0x57, 0xdf, 0x3d, 0xee, 0x1a, 0xab, 0x8c, 0x51, - 0xd9, 0x96, 0x1f, 0x2b, 0x66, 0x67, 0x42, 0xb6, 0x91, 0xfe, 0x8f, 0x4d, - 0xa6, 0xd3, 0x3b, 0x51, 0x45, 0x35, 0xab, 0xe5, 0x6e, 0x07, 0xed, 0x24, - 0x95, 0x3d, 0x6a, 0x47, 0x3f, 0x4e, 0xe4, 0x13, 0x5f, 0xfc, 0x19, 0xe8, - 0x09, 0x4b, 0x3d, 0xdf, 0x4f, 0xb4, 0xb4, 0xc1, 0x74, 0x31, 0xff, 0x13, - 0x00, 0xaf, 0x07, 0x16, 0xb6, 0x57, 0xfe, 0x6a, 0x37, 0x05, 0x62, 0x01, - 0xa0, 0xfa, 0xe2, 0xe5, 0x57, 0xcb, 0xa4, 0x5a, 0x57, 0xee, 0xd1, 0x5f, - 0x14, 0x23, 0xbe, 0xef, 0x9b, 0x91, 0x0f, 0x97, 0xa8, 0xf2, 0x36, 0xf7, - 0xc3, 0xb6, 0xbe, 0xe5, 0x59, 0x2b, 0x3c, 0xb3, 0x5d, 0x9f, 0x1e, 0x3b, - 0xd3, 0xf7, 0xee, 0x2e, 0xc0, 0x73, 0x6f, 0x2e, 0xfd, 0xc7, 0x3f, 0xfd, - 0x9c, 0xac, 0xbd, 0xa1, 0x8e, 0xcc, 0x59, 0x41, 0xa4, 0x41, 0xd3, 0x39, - 0x28, 0x67, 0x96, 0x14, 0x42, 0xc3, 0x38, 0x96, 0x0d, 0xfc, 0x68, 0x3d, - 0x2e, 0x2f, 0x46, 0x24, 0x66, 0x0d, 0xa6, 0x72, 0xc7, 0x27, 0x66, 0x3c, - 0xad, 0x55, 0xae, 0xbd, 0x34, 0xb4, 0x3b, 0x60, 0x73, 0xa5, 0xaa, 0xd4, - 0x56, 0x0b, 0x61, 0xf5, 0x5c, 0x66, 0x2e, 0x9d, 0x33, 0xfe, 0xfe, 0x7b, - 0x21, 0xbc, 0x36, 0xec, 0x0f, 0x03, 0x28, 0xa4, 0xd6, 0x05, 0x21, 0x30, - 0xf8, 0x3c, 0xd9, 0x3b, 0xaf, 0x5d, 0x92, 0x25, 0xce, 0xac, 0x28, 0xe1, - 0xd1, 0x02, 0x3c, 0x49, 0xe6, 0xed, 0xb7, 0x0e, 0xe7, 0xe7, 0x1e, 0x56, - 0xbf, 0x5d, 0xfd, 0xed, 0xdb, 0x4d, 0x63, 0x03, 0x8c, 0x06, 0x30, 0xfa, - 0x62, 0x78, 0x3f, 0x6e, 0x63, 0x1e, 0xa6, 0x4b, 0x96, 0xe9, 0xe4, 0x2d, - 0x16, 0x51, 0xf2, 0xf1, 0xa7, 0x2a, 0xeb, 0x15, 0xb5, 0xb1, 0x04, 0x9a, - 0xde, 0x77, 0xde, 0xcf, 0xcc, 0x21, 0xd9, 0x30, 0xf1, 0xea, 0xb9, 0xb0, - 0x39, 0xe1, 0x6f, 0xc7, 0x0a, 0xbd, 0x64, 0x75, 0x59, 0xbf, 0x3c, 0xbf, - 0xd0, 0xdb, 0x00, 0xfa, 0x2e, 0x36, 0xcc, 0xb5, 0xd1, 0x20, 0x46, 0xb0, - 0xd7, 0xfc, 0xb1, 0x5b, 0x54, 0x9f, 0xe2, 0xe1, 0xd0, 0x18, 0xa3, 0x51, - 0x62, 0x24, 0x0f, 0xa1, 0xa1, 0x9a, 0x47, 0x33, 0xca, 0xb9, 0x26, 0xb6, - 0x0b, 0x46, 0xd4, 0xb5, 0xc6, 0xbb, 0x72, 0x1e, 0x60, 0xeb, 0xb4, 0x9d, - 0x9f, 0x09, 0x10, 0x12, 0xce, 0x68, 0xa3, 0xb6, 0x8c, 0xce, 0xd7, 0x26, - 0x55, 0xb5, 0x90, 0x08, 0x9f, 0xf2, 0xa8, 0xc0, 0x56, 0xd8, 0xf6, 0x29, - 0x60, 0xe0, 0x73, 0x52, 0x22, 0x6f, 0x35, 0x4e, 0xe7, 0xc5, 0xa3, 0x95, - 0xcd, 0xd0, 0x8e, 0xd3, 0x95, 0xe3, 0x03, 0x04, 0x00, 0x54, 0xeb, 0xef, - 0x27, 0x11, 0xef, 0x38, 0x56, 0x6f, 0xa0, 0xe5, 0x72, 0x2a, 0x97, 0x23, - 0x56, 0xe2, 0x93, 0x21, 0x3f, 0xe2, 0xd6, 0x12, 0xcd, 0x61, 0x50, 0x44, - 0xd3, 0xe3, 0x8d, 0x3f, 0x24, 0x90, 0x6c, 0x53, 0xad, 0x1c, 0xad, 0x03, - 0x0f, 0x89, 0x63, 0xf9, 0xb9, 0xbc, 0xe2, 0x56, 0xdd, 0x16, 0xcf, 0x2d, - 0xa1, 0xda, 0xf9, 0x3f, 0xec, 0xbf, 0xb1, 0xb6, 0xe1, 0xdf, 0x3f, 0x11, - 0x02, 0x76, 0xe9, 0xe2, 0x9f, 0xa2, 0x02, 0xce, 0x3e, 0xf9, 0xcf, 0x4f, - 0xd9, 0x5f, 0x72, 0x5d, 0x51, 0xa7, 0x1d, 0x98, 0xeb, 0x8e, 0x97, 0x98, - 0x39, 0x58, 0x52, 0x11, 0xed, 0x95, 0x3c, 0x94, 0xf0, 0x6c, 0xa2, 0x3e, - 0x5f, 0x5f, 0x05, 0x98, 0xf1, 0x73, 0xab, 0xc7, 0xa8, 0x4b, 0x92, 0x73, - 0xda, 0x59, 0x1d, 0x56, 0x11, 0xc2, 0x38, 0x43, 0xdb, 0x4b, 0xbe, 0x08, - 0xdd, 0xf2, 0x5d, 0x47, 0x26, 0xdc, 0x16, 0xf9, 0x62, 0xf8, 0x92, 0x19, - 0x5c, 0x6f, 0x2b, 0xe1, 0x15, 0x66, 0xfa, 0xdb, 0x3a, 0xe0, 0x92, 0x9c, - 0x70, 0x91, 0x3f, 0xb8, 0xb0, 0x01, 0xc1, 0x44, 0xf6, 0x62, 0x47, 0x37, - 0xe9, 0xd9, 0x4c, 0x0f, 0x99, 0x6a, 0xc4, 0x60, 0x26, 0x2f, 0xc6, 0x43, - 0x50, 0x62, 0xee, 0x44, 0x21, 0xbd, 0xad, 0x50, 0x2d, 0x58, 0x78, 0xea, - 0x5a, 0x5f, 0x5c, 0xf7, 0x28, 0xa9, 0xdf, 0x0e, 0xd3, 0x67, 0xdf, 0x1f, - 0x4c, 0xd3, 0xe9, 0x5e, 0x0f, 0xa3, 0xb7, 0x56, 0xa5, 0x4e, 0x5f, 0x2a, - 0xb6, 0x14, 0x5e, 0x2f, 0x16, 0x71, 0x48, 0x59, 0x77, 0x6b, 0xf9, 0x6c, - 0x79, 0xba, 0xc4, 0x26, 0x30, 0x44, 0x61, 0x62, 0x60, 0xef, 0x35, 0x95, - 0xe3, 0x77, 0xd5, 0xc8, 0x44, 0xa4, 0xf8, 0x95, 0xba, 0xd1, 0x73, 0x6f, - 0x92, 0xf2, 0xd3, 0x98, 0x4c, 0x8f, 0xe0, 0x2e, 0x27, 0xaa, 0x2f, 0x63, - 0x00, 0x00, 0x00, 0x00, 0x06, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x80, 0x04, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x26, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x0e, 0xfe, 0xff, 0xff, 0xbb, 0xfd, 0xff, 0xff, 0xe1, 0x05, 0x00, 0x00, - 0x4b, 0x0f, 0x00, 0x00, 0x8e, 0x15, 0x00, 0x00, 0x7f, 0x04, 0x00, 0x00, - 0x02, 0x02, 0x00, 0x00, 0x53, 0xe6, 0xff, 0xff, 0xa6, 0x04, 0x00, 0x00, - 0xdf, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x66, 0xff, 0xff, 0xff, 0x04, 0x00, 0x00, 0x00, - 0x80, 0x00, 0x00, 0x00, 0x7f, 0xfd, 0xff, 0xff, 0x3e, 0xf8, 0xff, 0xff, - 0xae, 0x03, 0x00, 0x00, 0x5c, 0xfe, 0xff, 0xff, 0x82, 0xfa, 0xff, 0xff, - 0xbd, 0xf8, 0xff, 0xff, 0x04, 0xfe, 0xff, 0xff, 0x8c, 0xfe, 0xff, 0xff, - 0x9b, 0xf8, 0xff, 0xff, 0x51, 0x02, 0x00, 0x00, 0x19, 0xfe, 0xff, 0xff, - 0x54, 0xfe, 0xff, 0xff, 0x8f, 0xff, 0xff, 0xff, 0xe7, 0xfd, 0xff, 0xff, - 0xc2, 0x07, 0x00, 0x00, 0x36, 0x06, 0x00, 0x00, 0x57, 0xfd, 0xff, 0xff, - 0xa3, 0x03, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x79, 0x03, 0x00, 0x00, - 0x9b, 0xf7, 0xff, 0xff, 0xc7, 0x04, 0x00, 0x00, 0xbf, 0x06, 0x00, 0x00, - 0x86, 0xfe, 0xff, 0xff, 0x20, 0xfb, 0xff, 0xff, 0x90, 0xfc, 0xff, 0xff, - 0x16, 0x00, 0x00, 0x00, 0x8e, 0xff, 0xff, 0xff, 0xa0, 0x03, 0x00, 0x00, - 0xc7, 0xff, 0xff, 0xff, 0x51, 0x01, 0x00, 0x00, 0x24, 0xf8, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0xee, 0x01, 0x00, 0x00, - 0xda, 0x02, 0x00, 0x00, 0xa9, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, - 0xc4, 0xfe, 0xff, 0xff, 0xfa, 0xfc, 0xff, 0xff, 0xc0, 0xff, 0xff, 0xff, - 0x6a, 0xff, 0xff, 0xff, 0x92, 0x02, 0x00, 0x00, 0xa4, 0xff, 0xff, 0xff, - 0xfd, 0xfe, 0xff, 0xff, 0x4e, 0xfd, 0xff, 0xff, 0x87, 0x00, 0x00, 0x00, - 0x19, 0xfe, 0xff, 0xff, 0x17, 0xff, 0xff, 0xff, 0xa0, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xf4, 0xf3, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x04, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x4d, 0x4c, 0x49, 0x52, - 0x20, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, - 0x18, 0x00, 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0xf8, 0x01, 0x00, 0x00, 0xec, 0x01, 0x00, 0x00, - 0xe0, 0x01, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0xa4, 0x01, 0x00, 0x00, 0x4c, 0x01, 0x00, 0x00, - 0xfc, 0x00, 0x00, 0x00, 0xa8, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, - 0x38, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x82, 0xfe, 0xff, 0xff, - 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x07, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x08, 0x03, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xf4, 0xff, 0xff, - 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0xe6, 0xfe, 0xff, 0xff, 0x02, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x1a, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, - 0x07, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x01, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x07, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, - 0xc2, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xb4, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0c, 0x00, - 0x07, 0x00, 0x10, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x06, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x07, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x10, 0x00, 0x04, 0x00, - 0x08, 0x00, 0x0c, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0d, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xb8, 0x0d, 0x00, 0x00, - 0x64, 0x0c, 0x00, 0x00, 0x64, 0x0a, 0x00, 0x00, 0xe8, 0x09, 0x00, 0x00, - 0x9c, 0x09, 0x00, 0x00, 0x20, 0x09, 0x00, 0x00, 0x6c, 0x07, 0x00, 0x00, - 0x78, 0x04, 0x00, 0x00, 0x74, 0x03, 0x00, 0x00, 0x68, 0x02, 0x00, 0x00, - 0xbc, 0x01, 0x00, 0x00, 0x28, 0x01, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, - 0x54, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xc8, 0xff, 0xff, 0xff, - 0x28, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x00, 0x00, - 0x08, 0x00, 0x00, 0x00, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x14, 0x00, 0x10, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x30, 0xf3, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0x6c, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0x00, 0x00, 0x14, 0xf3, 0xff, 0xff, - 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x13, 0xc2, 0x47, 0x3b, - 0x01, 0x00, 0x00, 0x00, 0x8d, 0xf4, 0xad, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0x15, 0x00, 0xe0, 0xbe, 0x0d, 0x00, 0x00, 0x00, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x38, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0xb0, 0xf3, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x7c, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x80, 0x04, 0x00, 0x00, 0x94, 0xf3, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x6c, 0x02, 0xa5, 0x3a, - 0x01, 0x00, 0x00, 0x00, 0x6a, 0x5d, 0xa4, 0x3e, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x66, 0x6c, 0x61, 0x74, 0x74, - 0x65, 0x6e, 0x2f, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x00, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x80, 0x04, 0x00, 0x00, - 0x40, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x8c, 0x00, 0x00, 0x00, - 0x0b, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x2c, 0xf4, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x6c, 0x02, 0xa5, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x6a, 0x5d, 0xa4, 0x3e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x69, - 0x6e, 0x67, 0x32, 0x64, 0x2f, 0x4d, 0x61, 0x78, 0x50, 0x6f, 0x6f, 0x6c, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xe8, 0xf4, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xec, 0x00, 0x00, 0x00, - 0x0a, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xd4, 0xf4, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x6c, 0x02, 0xa5, 0x3a, 0x01, 0x00, 0x00, 0x00, - 0x6a, 0x5d, 0xa4, 0x3e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x83, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, - 0x52, 0x65, 0x6c, 0x75, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, - 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x3b, 0x73, 0x65, 0x71, - 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, - 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x3b, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x42, 0x69, 0x61, 0x73, - 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xf0, 0xf5, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0xe4, 0x00, 0x00, 0x00, - 0x09, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, - 0x0e, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xdc, 0xf5, 0xff, 0xff, 0x30, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, - 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x15, 0xa1, 0x10, 0x3b, 0x01, 0x00, 0x00, 0x00, - 0x74, 0x10, 0x10, 0x3f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x7b, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x52, 0x65, - 0x6c, 0x75, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x42, 0x69, 0x61, - 0x73, 0x41, 0x64, 0x64, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x43, - 0x6f, 0x6e, 0x76, 0x32, 0x44, 0x3b, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, - 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, 0x61, 0x64, - 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x3a, 0xf8, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, - 0xd4, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0xac, 0x02, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xc4, 0xf6, 0xff, 0xff, 0x1c, 0x02, 0x00, 0x00, - 0x94, 0x01, 0x00, 0x00, 0x0c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0xb9, 0x37, 0x74, 0x3a, 0x8b, 0xfe, 0x77, 0x3a, 0x54, 0xc7, 0x75, 0x3a, - 0xc4, 0x11, 0x78, 0x3a, 0xb9, 0x90, 0x74, 0x3a, 0x3b, 0x97, 0x7b, 0x3a, - 0xe8, 0x57, 0x75, 0x3a, 0x0c, 0x0e, 0x74, 0x3a, 0x76, 0x8b, 0x79, 0x3a, - 0x2b, 0x7b, 0x6d, 0x3a, 0x17, 0xad, 0x71, 0x3a, 0xe4, 0x9b, 0x77, 0x3a, - 0x0b, 0xab, 0x7a, 0x3a, 0x9e, 0x12, 0x75, 0x3a, 0x8c, 0xcf, 0x79, 0x3a, - 0xa0, 0x5a, 0x79, 0x3a, 0x74, 0xc3, 0x78, 0x3a, 0x0e, 0xa9, 0x74, 0x3a, - 0x6b, 0xf8, 0x6f, 0x3a, 0x53, 0xeb, 0x72, 0x3a, 0xff, 0xe2, 0x73, 0x3a, - 0x3b, 0x38, 0x78, 0x3a, 0xed, 0x9e, 0x76, 0x3a, 0x77, 0xbc, 0x6d, 0x3a, - 0x4f, 0xf5, 0x71, 0x3a, 0x17, 0xc9, 0x74, 0x3a, 0x87, 0x84, 0x6b, 0x3a, - 0x4b, 0xc5, 0x78, 0x3a, 0xdd, 0x02, 0x75, 0x3a, 0x0e, 0xcf, 0x78, 0x3a, - 0x14, 0x40, 0x75, 0x3a, 0x2e, 0xca, 0x72, 0x3a, 0x20, 0x00, 0x00, 0x00, - 0x95, 0x2f, 0xef, 0x3d, 0x47, 0x1c, 0xf0, 0x3d, 0xc5, 0xdb, 0xf3, 0x3d, - 0x2e, 0x57, 0xe7, 0x3d, 0x98, 0xa7, 0xf2, 0x3d, 0x98, 0x89, 0xe4, 0x3d, - 0x38, 0x6d, 0xf3, 0x3d, 0x3f, 0x38, 0xe2, 0x3d, 0x91, 0x6f, 0xf0, 0x3d, - 0x35, 0xa0, 0xeb, 0x3d, 0x42, 0x3d, 0xeb, 0x3d, 0xed, 0x89, 0xe7, 0x3d, - 0xb5, 0xb5, 0xf8, 0x3d, 0x79, 0x28, 0xf3, 0x3d, 0xed, 0xdb, 0xf7, 0x3d, - 0xeb, 0x67, 0xf7, 0x3d, 0xed, 0xd1, 0xf6, 0x3d, 0xbc, 0xbf, 0xf2, 0x3d, - 0x7a, 0x18, 0xee, 0x3d, 0x7c, 0x05, 0xf1, 0x3d, 0x63, 0x69, 0xe8, 0x3d, - 0xbb, 0xc0, 0xf1, 0x3d, 0xaf, 0xb1, 0xf4, 0x3d, 0xfe, 0xe0, 0xeb, 0x3d, - 0xb6, 0x60, 0xec, 0x3d, 0x8c, 0x32, 0xf0, 0x3d, 0x7e, 0xad, 0xe9, 0x3d, - 0xc0, 0xd3, 0xf6, 0x3d, 0xd7, 0x18, 0xf3, 0x3d, 0x40, 0x53, 0xf0, 0x3d, - 0x2c, 0xdc, 0xf1, 0x3d, 0x9a, 0xe4, 0xf0, 0x3d, 0x20, 0x00, 0x00, 0x00, - 0x4a, 0x4f, 0xf2, 0xbd, 0x8e, 0x0e, 0xf6, 0xbd, 0x74, 0x46, 0xec, 0xbd, - 0xa0, 0x21, 0xf6, 0xbd, 0x8e, 0x27, 0xf0, 0xbd, 0x0d, 0xa0, 0xf9, 0xbd, - 0x0c, 0x97, 0xec, 0xbd, 0xf0, 0x25, 0xf2, 0xbd, 0x5f, 0x98, 0xf7, 0xbd, - 0x27, 0x8d, 0xe8, 0xbd, 0xbd, 0xc9, 0xef, 0xbd, 0xac, 0xac, 0xf5, 0xbd, - 0x5a, 0x94, 0xed, 0xbd, 0x5a, 0x64, 0xf1, 0xbd, 0x2a, 0xa7, 0xe9, 0xbd, - 0x3c, 0x93, 0xf3, 0xbd, 0xf8, 0x2b, 0xf3, 0xbd, 0xf6, 0x35, 0xed, 0xbd, - 0x94, 0xf4, 0xed, 0xbd, 0x70, 0x94, 0xe9, 0xbd, 0x39, 0xfb, 0xf1, 0xbd, - 0xcb, 0x47, 0xf6, 0xbd, 0x88, 0xb9, 0xe7, 0xbd, 0x49, 0x62, 0xe9, 0xbd, - 0x64, 0x11, 0xf0, 0xbd, 0x85, 0xdf, 0xf2, 0xbd, 0x5c, 0x61, 0xe8, 0xbd, - 0x22, 0x46, 0xf3, 0xbd, 0x5a, 0x8e, 0xf0, 0xbd, 0x70, 0xdd, 0xf6, 0xbd, - 0x94, 0x55, 0xf3, 0xbd, 0x57, 0xba, 0xf0, 0xbd, 0x1a, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x43, 0x6f, 0x6e, 0x76, - 0x32, 0x44, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x2a, 0xfb, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x94, 0x01, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x6c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xb4, 0xf9, 0xff, 0xff, 0x1c, 0x01, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, - 0x8c, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xe6, 0x69, 0xc5, 0x3a, 0xa0, 0x8d, 0xa8, 0x3a, 0xfe, 0x5c, 0xc1, 0x3a, - 0x84, 0x01, 0xcb, 0x3a, 0xa2, 0xc2, 0xb5, 0x3a, 0x42, 0x01, 0xd1, 0x3a, - 0xd7, 0x01, 0xcc, 0x3a, 0x20, 0xd8, 0xc7, 0x3a, 0x28, 0x80, 0xa4, 0x3a, - 0xd9, 0x25, 0xbe, 0x3a, 0x39, 0x6f, 0xc4, 0x3a, 0x59, 0x6c, 0xcb, 0x3a, - 0xb8, 0x0a, 0xc2, 0x3a, 0x73, 0x3f, 0xca, 0x3a, 0xb9, 0xed, 0xc5, 0x3a, - 0xe9, 0x9f, 0xc1, 0x3a, 0x10, 0x00, 0x00, 0x00, 0x5b, 0x2e, 0x2f, 0x3e, - 0x3e, 0xd9, 0x06, 0x3e, 0x44, 0xda, 0x3f, 0x3e, 0xd3, 0x09, 0x22, 0x3e, - 0x1d, 0x57, 0x34, 0x3e, 0xa4, 0xb6, 0x44, 0x3e, 0xd3, 0x69, 0x4a, 0x3e, - 0x70, 0x48, 0x46, 0x3e, 0x28, 0x37, 0x23, 0x3e, 0xe6, 0xdb, 0x06, 0x3e, - 0x3c, 0x1d, 0x34, 0x3e, 0x36, 0xba, 0x16, 0x3e, 0x24, 0xa4, 0x34, 0x3e, - 0xf4, 0xfb, 0x37, 0x3e, 0xd6, 0x7b, 0x8a, 0x3d, 0x00, 0x85, 0xe3, 0x3d, - 0x10, 0x00, 0x00, 0x00, 0x12, 0xdf, 0x43, 0xbe, 0x85, 0x3c, 0x27, 0xbe, - 0x54, 0xcd, 0x0d, 0xbe, 0x81, 0x6b, 0x49, 0xbe, 0x33, 0xb1, 0xe7, 0xbd, - 0x3f, 0x5f, 0x4f, 0xbe, 0xa1, 0x63, 0x3e, 0xbe, 0xbb, 0xa7, 0xea, 0xbd, - 0x2d, 0x8c, 0x0e, 0xbe, 0x8d, 0xa9, 0x3c, 0xbe, 0x5b, 0xe6, 0x42, 0xbe, - 0x80, 0xd5, 0x49, 0xbe, 0xa3, 0x86, 0x40, 0xbe, 0xf4, 0xaa, 0x48, 0xbe, - 0xde, 0x61, 0x44, 0xbe, 0xa9, 0x1c, 0x40, 0xbe, 0x18, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x2f, 0x43, 0x6f, 0x6e, 0x76, 0x32, 0x44, - 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0xda, 0xfc, 0xff, 0xff, 0x00, 0x00, 0x00, 0x09, 0x64, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x64, 0xfb, 0xff, 0xff, 0x2c, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x07, 0x72, 0x1e, 0x3a, 0x01, 0x00, 0x00, 0x00, 0x32, 0xe2, 0x9b, 0x3d, - 0x01, 0x00, 0x00, 0x00, 0x23, 0x35, 0x9d, 0xbd, 0x17, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x2f, 0x4d, 0x61, 0x74, 0x4d, 0x75, 0x6c, 0x00, - 0x02, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x80, 0x04, 0x00, 0x00, - 0x52, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, 0x38, 0x00, 0x00, 0x00, - 0x05, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x66, - 0x6c, 0x61, 0x74, 0x74, 0x65, 0x6e, 0x2f, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x9a, 0xfd, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, 0x68, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x8c, 0xfd, 0xff, 0xff, 0x14, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0xfc, 0x41, 0x4c, 0x35, 0x30, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x64, - 0x65, 0x6e, 0x73, 0x65, 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, - 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, - 0x65, 0x4f, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x12, 0xfe, 0xff, 0xff, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x01, 0x00, 0x00, - 0x03, 0x00, 0x00, 0x00, 0x9c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x04, 0xfe, 0xff, 0xff, 0x0c, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x03, 0xf9, 0x09, 0x36, 0x3a, 0x1b, 0x0c, 0x36, 0xc6, 0xda, 0x0a, 0x36, - 0x16, 0x26, 0x0c, 0x36, 0x4b, 0x2b, 0x0a, 0x36, 0x60, 0x23, 0x0e, 0x36, - 0xd3, 0x9b, 0x0a, 0x36, 0x78, 0xe1, 0x09, 0x36, 0x78, 0xfb, 0x0c, 0x36, - 0xb6, 0x2a, 0x06, 0x36, 0x6f, 0x89, 0x08, 0x36, 0x7e, 0xe3, 0x0b, 0x36, - 0xf0, 0x9d, 0x0d, 0x36, 0xae, 0x74, 0x0a, 0x36, 0xef, 0x21, 0x0d, 0x36, - 0xe0, 0xdf, 0x0c, 0x36, 0x79, 0x8a, 0x0c, 0x36, 0x0a, 0x39, 0x0a, 0x36, - 0xbb, 0x92, 0x07, 0x36, 0x39, 0x3d, 0x09, 0x36, 0x25, 0xc9, 0x09, 0x36, - 0xd1, 0x3b, 0x0c, 0x36, 0x93, 0x54, 0x0b, 0x36, 0x9a, 0x4f, 0x06, 0x36, - 0x3c, 0xb2, 0x08, 0x36, 0x23, 0x4b, 0x0a, 0x36, 0xbe, 0x0e, 0x05, 0x36, - 0x83, 0x8b, 0x0c, 0x36, 0xc7, 0x6b, 0x0a, 0x36, 0x07, 0x91, 0x0c, 0x36, - 0x5d, 0x8e, 0x0a, 0x36, 0x7f, 0x2a, 0x09, 0x36, 0x33, 0x00, 0x00, 0x00, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, - 0x6f, 0x6e, 0x76, 0x32, 0x64, 0x5f, 0x31, 0x2f, 0x42, 0x69, 0x61, 0x73, - 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x00, 0x01, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0e, 0x00, 0x18, 0x00, 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00, - 0x10, 0x00, 0x14, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x2c, 0x01, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0xec, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x08, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0xe1, 0x22, 0xc6, 0x36, 0x90, 0x2b, 0xa9, 0x36, 0x2d, 0x12, 0xc2, 0x36, - 0xbc, 0xbf, 0xcb, 0x36, 0xf2, 0x6c, 0xb6, 0x36, 0x19, 0xc5, 0xd1, 0x36, - 0xff, 0xc0, 0xcc, 0x36, 0x62, 0x93, 0xc8, 0x36, 0x4c, 0x1a, 0xa5, 0x36, - 0x05, 0xd8, 0xbe, 0x36, 0x49, 0x27, 0xc5, 0x36, 0xf5, 0x2a, 0xcc, 0x36, - 0x8a, 0xc0, 0xc2, 0x36, 0xf5, 0xfc, 0xca, 0x36, 0x2f, 0xa7, 0xc6, 0x36, - 0x57, 0x55, 0xc2, 0x36, 0x31, 0x00, 0x00, 0x00, 0x73, 0x65, 0x71, 0x75, - 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x32, - 0x64, 0x2f, 0x42, 0x69, 0x61, 0x73, 0x41, 0x64, 0x64, 0x2f, 0x52, 0x65, - 0x61, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, - 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x14, 0x00, 0x1c, 0x00, - 0x08, 0x00, 0x07, 0x00, 0x0c, 0x00, 0x10, 0x00, 0x14, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x18, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x68, 0x00, 0x00, 0x00, - 0x28, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xff, 0xff, 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x14, 0x00, 0x04, 0x00, 0x08, 0x00, - 0x0c, 0x00, 0x10, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x24, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xf0, 0x77, 0x80, 0x3b, - 0x01, 0x00, 0x00, 0x00, 0xf0, 0xee, 0x7f, 0x3f, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x63, 0x6f, 0x6e, 0x76, - 0x32, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x74, - 0x38, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x10, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00, 0x00, 0x54, 0x00, 0x00, 0x00, - 0x40, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0xca, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x06, - 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x08, 0x00, 0x07, 0x00, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0xe6, 0xff, 0xff, 0xff, - 0x00, 0x00, 0x00, 0x09, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, - 0x06, 0x00, 0x05, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x16, 0x0a, 0x00, - 0x0e, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x11, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, - 0x0c, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00}; - -const unsigned int kTestConvModelDataSize = 21344; diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/test_conv_model.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/test_conv_model.h deleted file mode 100644 index 2103196e..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/micro/testing/test_conv_model.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ -#define TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ - -// See generate_test_models.py for updating the contents of this model: -extern const unsigned char kTestConvModelData[]; -extern const unsigned int kTestConvModelDataSize; - -#endif // TENSORFLOW_LITE_MICRO_TESTING_TEST_CONV_MODEL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/portable_type_to_tflitetype.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/portable_type_to_tflitetype.h deleted file mode 100644 index 9bfaf6b7..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/portable_type_to_tflitetype.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ -#define TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ - -// Most of the definitions have been moved to this subheader so that Micro -// can include it without relying on and , which isn't -// available on all platforms. - -// Arduino build defines abs as a macro here. That is invalid C++, and breaks -// libc++'s header, undefine it. -#ifdef abs -#undef abs -#endif - -#include "tensorflow/lite/c/common.h" - -namespace tflite { - -// Map statically from a C++ type to a TfLiteType. Used in interpreter for -// safe casts. -// Example: -// typeToTfLiteType() -> kTfLiteBool -template -constexpr TfLiteType typeToTfLiteType() { - return kTfLiteNoType; -} -// Map from TfLiteType to the corresponding C++ type. -// Example: -// TfLiteTypeToType::Type -> bool -template -struct TfLiteTypeToType {}; // Specializations below - -// Template specialization for both typeToTfLiteType and TfLiteTypeToType. -#define MATCH_TYPE_AND_TFLITE_TYPE(CPP_TYPE, TFLITE_TYPE_ENUM) \ - template <> \ - constexpr TfLiteType typeToTfLiteType() { \ - return TFLITE_TYPE_ENUM; \ - } \ - template <> \ - struct TfLiteTypeToType { \ - using Type = CPP_TYPE; \ - } - -// No string mapping is included here, since the TF Lite packed representation -// doesn't correspond to a C++ type well. -MATCH_TYPE_AND_TFLITE_TYPE(int32_t, kTfLiteInt32); -MATCH_TYPE_AND_TFLITE_TYPE(int16_t, kTfLiteInt16); -MATCH_TYPE_AND_TFLITE_TYPE(int64_t, kTfLiteInt64); -MATCH_TYPE_AND_TFLITE_TYPE(float, kTfLiteFloat32); -MATCH_TYPE_AND_TFLITE_TYPE(unsigned char, kTfLiteUInt8); -MATCH_TYPE_AND_TFLITE_TYPE(int8_t, kTfLiteInt8); -MATCH_TYPE_AND_TFLITE_TYPE(bool, kTfLiteBool); -MATCH_TYPE_AND_TFLITE_TYPE(TfLiteFloat16, kTfLiteFloat16); -MATCH_TYPE_AND_TFLITE_TYPE(double, kTfLiteFloat64); -MATCH_TYPE_AND_TFLITE_TYPE(uint64_t, kTfLiteUInt64); - -} // namespace tflite -#endif // TENSORFLOW_LITE_PORTABLE_TYPE_TO_TFLITETYPE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_generated.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_generated.h deleted file mode 100644 index 16c4ee19..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_generated.h +++ /dev/null @@ -1,17147 +0,0 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ -#define FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ - -#include "flatbuffers/flatbuffers.h" - -namespace tflite { - -struct CustomQuantization; -struct CustomQuantizationT; - -struct QuantizationParameters; -struct QuantizationParametersT; - -struct Int32Vector; -struct Int32VectorT; - -struct Uint16Vector; -struct Uint16VectorT; - -struct Uint8Vector; -struct Uint8VectorT; - -struct DimensionMetadata; -struct DimensionMetadataT; - -struct SparsityParameters; -struct SparsityParametersT; - -struct Tensor; -struct TensorT; - -struct Conv2DOptions; -struct Conv2DOptionsT; - -struct Pool2DOptions; -struct Pool2DOptionsT; - -struct DepthwiseConv2DOptions; -struct DepthwiseConv2DOptionsT; - -struct ConcatEmbeddingsOptions; -struct ConcatEmbeddingsOptionsT; - -struct LSHProjectionOptions; -struct LSHProjectionOptionsT; - -struct SVDFOptions; -struct SVDFOptionsT; - -struct RNNOptions; -struct RNNOptionsT; - -struct SequenceRNNOptions; -struct SequenceRNNOptionsT; - -struct BidirectionalSequenceRNNOptions; -struct BidirectionalSequenceRNNOptionsT; - -struct FullyConnectedOptions; -struct FullyConnectedOptionsT; - -struct SoftmaxOptions; -struct SoftmaxOptionsT; - -struct ConcatenationOptions; -struct ConcatenationOptionsT; - -struct AddOptions; -struct AddOptionsT; - -struct MulOptions; -struct MulOptionsT; - -struct L2NormOptions; -struct L2NormOptionsT; - -struct LocalResponseNormalizationOptions; -struct LocalResponseNormalizationOptionsT; - -struct LSTMOptions; -struct LSTMOptionsT; - -struct UnidirectionalSequenceLSTMOptions; -struct UnidirectionalSequenceLSTMOptionsT; - -struct BidirectionalSequenceLSTMOptions; -struct BidirectionalSequenceLSTMOptionsT; - -struct ResizeBilinearOptions; -struct ResizeBilinearOptionsT; - -struct ResizeNearestNeighborOptions; -struct ResizeNearestNeighborOptionsT; - -struct CallOptions; -struct CallOptionsT; - -struct PadOptions; -struct PadOptionsT; - -struct PadV2Options; -struct PadV2OptionsT; - -struct ReshapeOptions; -struct ReshapeOptionsT; - -struct SpaceToBatchNDOptions; -struct SpaceToBatchNDOptionsT; - -struct BatchToSpaceNDOptions; -struct BatchToSpaceNDOptionsT; - -struct SkipGramOptions; -struct SkipGramOptionsT; - -struct SpaceToDepthOptions; -struct SpaceToDepthOptionsT; - -struct DepthToSpaceOptions; -struct DepthToSpaceOptionsT; - -struct SubOptions; -struct SubOptionsT; - -struct DivOptions; -struct DivOptionsT; - -struct TopKV2Options; -struct TopKV2OptionsT; - -struct EmbeddingLookupSparseOptions; -struct EmbeddingLookupSparseOptionsT; - -struct GatherOptions; -struct GatherOptionsT; - -struct TransposeOptions; -struct TransposeOptionsT; - -struct ExpOptions; -struct ExpOptionsT; - -struct CosOptions; -struct CosOptionsT; - -struct ReducerOptions; -struct ReducerOptionsT; - -struct SqueezeOptions; -struct SqueezeOptionsT; - -struct SplitOptions; -struct SplitOptionsT; - -struct SplitVOptions; -struct SplitVOptionsT; - -struct StridedSliceOptions; -struct StridedSliceOptionsT; - -struct LogSoftmaxOptions; -struct LogSoftmaxOptionsT; - -struct CastOptions; -struct CastOptionsT; - -struct DequantizeOptions; -struct DequantizeOptionsT; - -struct MaximumMinimumOptions; -struct MaximumMinimumOptionsT; - -struct TileOptions; -struct TileOptionsT; - -struct ArgMaxOptions; -struct ArgMaxOptionsT; - -struct ArgMinOptions; -struct ArgMinOptionsT; - -struct GreaterOptions; -struct GreaterOptionsT; - -struct GreaterEqualOptions; -struct GreaterEqualOptionsT; - -struct LessOptions; -struct LessOptionsT; - -struct LessEqualOptions; -struct LessEqualOptionsT; - -struct NegOptions; -struct NegOptionsT; - -struct SelectOptions; -struct SelectOptionsT; - -struct SliceOptions; -struct SliceOptionsT; - -struct TransposeConvOptions; -struct TransposeConvOptionsT; - -struct ExpandDimsOptions; -struct ExpandDimsOptionsT; - -struct SparseToDenseOptions; -struct SparseToDenseOptionsT; - -struct EqualOptions; -struct EqualOptionsT; - -struct NotEqualOptions; -struct NotEqualOptionsT; - -struct ShapeOptions; -struct ShapeOptionsT; - -struct RankOptions; -struct RankOptionsT; - -struct PowOptions; -struct PowOptionsT; - -struct FakeQuantOptions; -struct FakeQuantOptionsT; - -struct PackOptions; -struct PackOptionsT; - -struct LogicalOrOptions; -struct LogicalOrOptionsT; - -struct OneHotOptions; -struct OneHotOptionsT; - -struct AbsOptions; -struct AbsOptionsT; - -struct HardSwishOptions; -struct HardSwishOptionsT; - -struct LogicalAndOptions; -struct LogicalAndOptionsT; - -struct LogicalNotOptions; -struct LogicalNotOptionsT; - -struct UnpackOptions; -struct UnpackOptionsT; - -struct FloorDivOptions; -struct FloorDivOptionsT; - -struct SquareOptions; -struct SquareOptionsT; - -struct ZerosLikeOptions; -struct ZerosLikeOptionsT; - -struct FillOptions; -struct FillOptionsT; - -struct FloorModOptions; -struct FloorModOptionsT; - -struct RangeOptions; -struct RangeOptionsT; - -struct LeakyReluOptions; -struct LeakyReluOptionsT; - -struct SquaredDifferenceOptions; -struct SquaredDifferenceOptionsT; - -struct MirrorPadOptions; -struct MirrorPadOptionsT; - -struct UniqueOptions; -struct UniqueOptionsT; - -struct ReverseV2Options; -struct ReverseV2OptionsT; - -struct AddNOptions; -struct AddNOptionsT; - -struct GatherNdOptions; -struct GatherNdOptionsT; - -struct WhereOptions; -struct WhereOptionsT; - -struct ReverseSequenceOptions; -struct ReverseSequenceOptionsT; - -struct MatrixDiagOptions; -struct MatrixDiagOptionsT; - -struct QuantizeOptions; -struct QuantizeOptionsT; - -struct MatrixSetDiagOptions; -struct MatrixSetDiagOptionsT; - -struct IfOptions; -struct IfOptionsT; - -struct CallOnceOptions; -struct CallOnceOptionsT; - -struct WhileOptions; -struct WhileOptionsT; - -struct NonMaxSuppressionV4Options; -struct NonMaxSuppressionV4OptionsT; - -struct NonMaxSuppressionV5Options; -struct NonMaxSuppressionV5OptionsT; - -struct ScatterNdOptions; -struct ScatterNdOptionsT; - -struct SelectV2Options; -struct SelectV2OptionsT; - -struct DensifyOptions; -struct DensifyOptionsT; - -struct SegmentSumOptions; -struct SegmentSumOptionsT; - -struct BatchMatMulOptions; -struct BatchMatMulOptionsT; - -struct CumsumOptions; -struct CumsumOptionsT; - -struct BroadcastToOptions; -struct BroadcastToOptionsT; - -struct Rfft2dOptions; -struct Rfft2dOptionsT; - -struct OperatorCode; -struct OperatorCodeT; - -struct Operator; -struct OperatorT; - -struct SubGraph; -struct SubGraphT; - -struct Buffer; -struct BufferT; - -struct Metadata; -struct MetadataT; - -struct TensorMap; -struct TensorMapT; - -struct SignatureDef; -struct SignatureDefT; - -struct Model; -struct ModelT; - -enum TensorType { - TensorType_FLOAT32 = 0, - TensorType_FLOAT16 = 1, - TensorType_INT32 = 2, - TensorType_UINT8 = 3, - TensorType_INT64 = 4, - TensorType_STRING = 5, - TensorType_BOOL = 6, - TensorType_INT16 = 7, - TensorType_COMPLEX64 = 8, - TensorType_INT8 = 9, - TensorType_FLOAT64 = 10, - TensorType_COMPLEX128 = 11, - TensorType_UINT64 = 12, - TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_UINT64 -}; - -inline const TensorType (&EnumValuesTensorType())[13] { - static const TensorType values[] = { - TensorType_FLOAT32, - TensorType_FLOAT16, - TensorType_INT32, - TensorType_UINT8, - TensorType_INT64, - TensorType_STRING, - TensorType_BOOL, - TensorType_INT16, - TensorType_COMPLEX64, - TensorType_INT8, - TensorType_FLOAT64, - TensorType_COMPLEX128, - TensorType_UINT64 - }; - return values; -} - -inline const char * const *EnumNamesTensorType() { - static const char * const names[14] = { - "FLOAT32", - "FLOAT16", - "INT32", - "UINT8", - "INT64", - "STRING", - "BOOL", - "INT16", - "COMPLEX64", - "INT8", - "FLOAT64", - "COMPLEX128", - "UINT64", - nullptr - }; - return names; -} - -inline const char *EnumNameTensorType(TensorType e) { - if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_UINT64)) return ""; - const size_t index = static_cast(e); - return EnumNamesTensorType()[index]; -} - -enum QuantizationDetails { - QuantizationDetails_NONE = 0, - QuantizationDetails_CustomQuantization = 1, - QuantizationDetails_MIN = QuantizationDetails_NONE, - QuantizationDetails_MAX = QuantizationDetails_CustomQuantization -}; - -inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] { - static const QuantizationDetails values[] = { - QuantizationDetails_NONE, - QuantizationDetails_CustomQuantization - }; - return values; -} - -inline const char * const *EnumNamesQuantizationDetails() { - static const char * const names[3] = { - "NONE", - "CustomQuantization", - nullptr - }; - return names; -} - -inline const char *EnumNameQuantizationDetails(QuantizationDetails e) { - if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return ""; - const size_t index = static_cast(e); - return EnumNamesQuantizationDetails()[index]; -} - -template struct QuantizationDetailsTraits { - static const QuantizationDetails enum_value = QuantizationDetails_NONE; -}; - -template<> struct QuantizationDetailsTraits { - static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; -}; - -struct QuantizationDetailsUnion { - QuantizationDetails type; - void *value; - - QuantizationDetailsUnion() : type(QuantizationDetails_NONE), value(nullptr) {} - QuantizationDetailsUnion(QuantizationDetailsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(QuantizationDetails_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - QuantizationDetailsUnion(const QuantizationDetailsUnion &) FLATBUFFERS_NOEXCEPT; - QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT - { QuantizationDetailsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~QuantizationDetailsUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = QuantizationDetailsTraits::enum_value; - if (type != QuantizationDetails_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL - - static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::CustomQuantizationT *AsCustomQuantization() { - return type == QuantizationDetails_CustomQuantization ? - reinterpret_cast(value) : nullptr; - } - const tflite::CustomQuantizationT *AsCustomQuantization() const { - return type == QuantizationDetails_CustomQuantization ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); -bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum DimensionType { - DimensionType_DENSE = 0, - DimensionType_SPARSE_CSR = 1, - DimensionType_MIN = DimensionType_DENSE, - DimensionType_MAX = DimensionType_SPARSE_CSR -}; - -inline const DimensionType (&EnumValuesDimensionType())[2] { - static const DimensionType values[] = { - DimensionType_DENSE, - DimensionType_SPARSE_CSR - }; - return values; -} - -inline const char * const *EnumNamesDimensionType() { - static const char * const names[3] = { - "DENSE", - "SPARSE_CSR", - nullptr - }; - return names; -} - -inline const char *EnumNameDimensionType(DimensionType e) { - if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return ""; - const size_t index = static_cast(e); - return EnumNamesDimensionType()[index]; -} - -enum SparseIndexVector { - SparseIndexVector_NONE = 0, - SparseIndexVector_Int32Vector = 1, - SparseIndexVector_Uint16Vector = 2, - SparseIndexVector_Uint8Vector = 3, - SparseIndexVector_MIN = SparseIndexVector_NONE, - SparseIndexVector_MAX = SparseIndexVector_Uint8Vector -}; - -inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4] { - static const SparseIndexVector values[] = { - SparseIndexVector_NONE, - SparseIndexVector_Int32Vector, - SparseIndexVector_Uint16Vector, - SparseIndexVector_Uint8Vector - }; - return values; -} - -inline const char * const *EnumNamesSparseIndexVector() { - static const char * const names[5] = { - "NONE", - "Int32Vector", - "Uint16Vector", - "Uint8Vector", - nullptr - }; - return names; -} - -inline const char *EnumNameSparseIndexVector(SparseIndexVector e) { - if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return ""; - const size_t index = static_cast(e); - return EnumNamesSparseIndexVector()[index]; -} - -template struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_NONE; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; -}; - -struct SparseIndexVectorUnion { - SparseIndexVector type; - void *value; - - SparseIndexVectorUnion() : type(SparseIndexVector_NONE), value(nullptr) {} - SparseIndexVectorUnion(SparseIndexVectorUnion&& u) FLATBUFFERS_NOEXCEPT : - type(SparseIndexVector_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - SparseIndexVectorUnion(const SparseIndexVectorUnion &) FLATBUFFERS_NOEXCEPT; - SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u) FLATBUFFERS_NOEXCEPT - { SparseIndexVectorUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - SparseIndexVectorUnion &operator=(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~SparseIndexVectorUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = SparseIndexVectorTraits::enum_value; - if (type != SparseIndexVector_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL - - static void *UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::Int32VectorT *AsInt32Vector() { - return type == SparseIndexVector_Int32Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Int32VectorT *AsInt32Vector() const { - return type == SparseIndexVector_Int32Vector ? - reinterpret_cast(value) : nullptr; - } - tflite::Uint16VectorT *AsUint16Vector() { - return type == SparseIndexVector_Uint16Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Uint16VectorT *AsUint16Vector() const { - return type == SparseIndexVector_Uint16Vector ? - reinterpret_cast(value) : nullptr; - } - tflite::Uint8VectorT *AsUint8Vector() { - return type == SparseIndexVector_Uint8Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Uint8VectorT *AsUint8Vector() const { - return type == SparseIndexVector_Uint8Vector ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type); -bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum BuiltinOperator { - BuiltinOperator_ADD = 0, - BuiltinOperator_AVERAGE_POOL_2D = 1, - BuiltinOperator_CONCATENATION = 2, - BuiltinOperator_CONV_2D = 3, - BuiltinOperator_DEPTHWISE_CONV_2D = 4, - BuiltinOperator_DEPTH_TO_SPACE = 5, - BuiltinOperator_DEQUANTIZE = 6, - BuiltinOperator_EMBEDDING_LOOKUP = 7, - BuiltinOperator_FLOOR = 8, - BuiltinOperator_FULLY_CONNECTED = 9, - BuiltinOperator_HASHTABLE_LOOKUP = 10, - BuiltinOperator_L2_NORMALIZATION = 11, - BuiltinOperator_L2_POOL_2D = 12, - BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION = 13, - BuiltinOperator_LOGISTIC = 14, - BuiltinOperator_LSH_PROJECTION = 15, - BuiltinOperator_LSTM = 16, - BuiltinOperator_MAX_POOL_2D = 17, - BuiltinOperator_MUL = 18, - BuiltinOperator_RELU = 19, - BuiltinOperator_RELU_N1_TO_1 = 20, - BuiltinOperator_RELU6 = 21, - BuiltinOperator_RESHAPE = 22, - BuiltinOperator_RESIZE_BILINEAR = 23, - BuiltinOperator_RNN = 24, - BuiltinOperator_SOFTMAX = 25, - BuiltinOperator_SPACE_TO_DEPTH = 26, - BuiltinOperator_SVDF = 27, - BuiltinOperator_TANH = 28, - BuiltinOperator_CONCAT_EMBEDDINGS = 29, - BuiltinOperator_SKIP_GRAM = 30, - BuiltinOperator_CALL = 31, - BuiltinOperator_CUSTOM = 32, - BuiltinOperator_EMBEDDING_LOOKUP_SPARSE = 33, - BuiltinOperator_PAD = 34, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN = 35, - BuiltinOperator_GATHER = 36, - BuiltinOperator_BATCH_TO_SPACE_ND = 37, - BuiltinOperator_SPACE_TO_BATCH_ND = 38, - BuiltinOperator_TRANSPOSE = 39, - BuiltinOperator_MEAN = 40, - BuiltinOperator_SUB = 41, - BuiltinOperator_DIV = 42, - BuiltinOperator_SQUEEZE = 43, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM = 44, - BuiltinOperator_STRIDED_SLICE = 45, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN = 46, - BuiltinOperator_EXP = 47, - BuiltinOperator_TOPK_V2 = 48, - BuiltinOperator_SPLIT = 49, - BuiltinOperator_LOG_SOFTMAX = 50, - BuiltinOperator_DELEGATE = 51, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM = 52, - BuiltinOperator_CAST = 53, - BuiltinOperator_PRELU = 54, - BuiltinOperator_MAXIMUM = 55, - BuiltinOperator_ARG_MAX = 56, - BuiltinOperator_MINIMUM = 57, - BuiltinOperator_LESS = 58, - BuiltinOperator_NEG = 59, - BuiltinOperator_PADV2 = 60, - BuiltinOperator_GREATER = 61, - BuiltinOperator_GREATER_EQUAL = 62, - BuiltinOperator_LESS_EQUAL = 63, - BuiltinOperator_SELECT = 64, - BuiltinOperator_SLICE = 65, - BuiltinOperator_SIN = 66, - BuiltinOperator_TRANSPOSE_CONV = 67, - BuiltinOperator_SPARSE_TO_DENSE = 68, - BuiltinOperator_TILE = 69, - BuiltinOperator_EXPAND_DIMS = 70, - BuiltinOperator_EQUAL = 71, - BuiltinOperator_NOT_EQUAL = 72, - BuiltinOperator_LOG = 73, - BuiltinOperator_SUM = 74, - BuiltinOperator_SQRT = 75, - BuiltinOperator_RSQRT = 76, - BuiltinOperator_SHAPE = 77, - BuiltinOperator_POW = 78, - BuiltinOperator_ARG_MIN = 79, - BuiltinOperator_FAKE_QUANT = 80, - BuiltinOperator_REDUCE_PROD = 81, - BuiltinOperator_REDUCE_MAX = 82, - BuiltinOperator_PACK = 83, - BuiltinOperator_LOGICAL_OR = 84, - BuiltinOperator_ONE_HOT = 85, - BuiltinOperator_LOGICAL_AND = 86, - BuiltinOperator_LOGICAL_NOT = 87, - BuiltinOperator_UNPACK = 88, - BuiltinOperator_REDUCE_MIN = 89, - BuiltinOperator_FLOOR_DIV = 90, - BuiltinOperator_REDUCE_ANY = 91, - BuiltinOperator_SQUARE = 92, - BuiltinOperator_ZEROS_LIKE = 93, - BuiltinOperator_FILL = 94, - BuiltinOperator_FLOOR_MOD = 95, - BuiltinOperator_RANGE = 96, - BuiltinOperator_RESIZE_NEAREST_NEIGHBOR = 97, - BuiltinOperator_LEAKY_RELU = 98, - BuiltinOperator_SQUARED_DIFFERENCE = 99, - BuiltinOperator_MIRROR_PAD = 100, - BuiltinOperator_ABS = 101, - BuiltinOperator_SPLIT_V = 102, - BuiltinOperator_UNIQUE = 103, - BuiltinOperator_CEIL = 104, - BuiltinOperator_REVERSE_V2 = 105, - BuiltinOperator_ADD_N = 106, - BuiltinOperator_GATHER_ND = 107, - BuiltinOperator_COS = 108, - BuiltinOperator_WHERE = 109, - BuiltinOperator_RANK = 110, - BuiltinOperator_ELU = 111, - BuiltinOperator_REVERSE_SEQUENCE = 112, - BuiltinOperator_MATRIX_DIAG = 113, - BuiltinOperator_QUANTIZE = 114, - BuiltinOperator_MATRIX_SET_DIAG = 115, - BuiltinOperator_ROUND = 116, - BuiltinOperator_HARD_SWISH = 117, - BuiltinOperator_IF = 118, - BuiltinOperator_WHILE = 119, - BuiltinOperator_NON_MAX_SUPPRESSION_V4 = 120, - BuiltinOperator_NON_MAX_SUPPRESSION_V5 = 121, - BuiltinOperator_SCATTER_ND = 122, - BuiltinOperator_SELECT_V2 = 123, - BuiltinOperator_DENSIFY = 124, - BuiltinOperator_SEGMENT_SUM = 125, - BuiltinOperator_BATCH_MATMUL = 126, - BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127, - BuiltinOperator_CUMSUM = 128, - BuiltinOperator_CALL_ONCE = 129, - BuiltinOperator_BROADCAST_TO = 130, - BuiltinOperator_RFFT2D = 131, - BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_RFFT2D -}; - -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[132] { - static const BuiltinOperator values[] = { - BuiltinOperator_ADD, - BuiltinOperator_AVERAGE_POOL_2D, - BuiltinOperator_CONCATENATION, - BuiltinOperator_CONV_2D, - BuiltinOperator_DEPTHWISE_CONV_2D, - BuiltinOperator_DEPTH_TO_SPACE, - BuiltinOperator_DEQUANTIZE, - BuiltinOperator_EMBEDDING_LOOKUP, - BuiltinOperator_FLOOR, - BuiltinOperator_FULLY_CONNECTED, - BuiltinOperator_HASHTABLE_LOOKUP, - BuiltinOperator_L2_NORMALIZATION, - BuiltinOperator_L2_POOL_2D, - BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, - BuiltinOperator_LOGISTIC, - BuiltinOperator_LSH_PROJECTION, - BuiltinOperator_LSTM, - BuiltinOperator_MAX_POOL_2D, - BuiltinOperator_MUL, - BuiltinOperator_RELU, - BuiltinOperator_RELU_N1_TO_1, - BuiltinOperator_RELU6, - BuiltinOperator_RESHAPE, - BuiltinOperator_RESIZE_BILINEAR, - BuiltinOperator_RNN, - BuiltinOperator_SOFTMAX, - BuiltinOperator_SPACE_TO_DEPTH, - BuiltinOperator_SVDF, - BuiltinOperator_TANH, - BuiltinOperator_CONCAT_EMBEDDINGS, - BuiltinOperator_SKIP_GRAM, - BuiltinOperator_CALL, - BuiltinOperator_CUSTOM, - BuiltinOperator_EMBEDDING_LOOKUP_SPARSE, - BuiltinOperator_PAD, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN, - BuiltinOperator_GATHER, - BuiltinOperator_BATCH_TO_SPACE_ND, - BuiltinOperator_SPACE_TO_BATCH_ND, - BuiltinOperator_TRANSPOSE, - BuiltinOperator_MEAN, - BuiltinOperator_SUB, - BuiltinOperator_DIV, - BuiltinOperator_SQUEEZE, - BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, - BuiltinOperator_STRIDED_SLICE, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, - BuiltinOperator_EXP, - BuiltinOperator_TOPK_V2, - BuiltinOperator_SPLIT, - BuiltinOperator_LOG_SOFTMAX, - BuiltinOperator_DELEGATE, - BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM, - BuiltinOperator_CAST, - BuiltinOperator_PRELU, - BuiltinOperator_MAXIMUM, - BuiltinOperator_ARG_MAX, - BuiltinOperator_MINIMUM, - BuiltinOperator_LESS, - BuiltinOperator_NEG, - BuiltinOperator_PADV2, - BuiltinOperator_GREATER, - BuiltinOperator_GREATER_EQUAL, - BuiltinOperator_LESS_EQUAL, - BuiltinOperator_SELECT, - BuiltinOperator_SLICE, - BuiltinOperator_SIN, - BuiltinOperator_TRANSPOSE_CONV, - BuiltinOperator_SPARSE_TO_DENSE, - BuiltinOperator_TILE, - BuiltinOperator_EXPAND_DIMS, - BuiltinOperator_EQUAL, - BuiltinOperator_NOT_EQUAL, - BuiltinOperator_LOG, - BuiltinOperator_SUM, - BuiltinOperator_SQRT, - BuiltinOperator_RSQRT, - BuiltinOperator_SHAPE, - BuiltinOperator_POW, - BuiltinOperator_ARG_MIN, - BuiltinOperator_FAKE_QUANT, - BuiltinOperator_REDUCE_PROD, - BuiltinOperator_REDUCE_MAX, - BuiltinOperator_PACK, - BuiltinOperator_LOGICAL_OR, - BuiltinOperator_ONE_HOT, - BuiltinOperator_LOGICAL_AND, - BuiltinOperator_LOGICAL_NOT, - BuiltinOperator_UNPACK, - BuiltinOperator_REDUCE_MIN, - BuiltinOperator_FLOOR_DIV, - BuiltinOperator_REDUCE_ANY, - BuiltinOperator_SQUARE, - BuiltinOperator_ZEROS_LIKE, - BuiltinOperator_FILL, - BuiltinOperator_FLOOR_MOD, - BuiltinOperator_RANGE, - BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, - BuiltinOperator_LEAKY_RELU, - BuiltinOperator_SQUARED_DIFFERENCE, - BuiltinOperator_MIRROR_PAD, - BuiltinOperator_ABS, - BuiltinOperator_SPLIT_V, - BuiltinOperator_UNIQUE, - BuiltinOperator_CEIL, - BuiltinOperator_REVERSE_V2, - BuiltinOperator_ADD_N, - BuiltinOperator_GATHER_ND, - BuiltinOperator_COS, - BuiltinOperator_WHERE, - BuiltinOperator_RANK, - BuiltinOperator_ELU, - BuiltinOperator_REVERSE_SEQUENCE, - BuiltinOperator_MATRIX_DIAG, - BuiltinOperator_QUANTIZE, - BuiltinOperator_MATRIX_SET_DIAG, - BuiltinOperator_ROUND, - BuiltinOperator_HARD_SWISH, - BuiltinOperator_IF, - BuiltinOperator_WHILE, - BuiltinOperator_NON_MAX_SUPPRESSION_V4, - BuiltinOperator_NON_MAX_SUPPRESSION_V5, - BuiltinOperator_SCATTER_ND, - BuiltinOperator_SELECT_V2, - BuiltinOperator_DENSIFY, - BuiltinOperator_SEGMENT_SUM, - BuiltinOperator_BATCH_MATMUL, - BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES, - BuiltinOperator_CUMSUM, - BuiltinOperator_CALL_ONCE, - BuiltinOperator_BROADCAST_TO, - BuiltinOperator_RFFT2D}; - return values; -} - -inline const char * const *EnumNamesBuiltinOperator() { - static const char *const names[133] = {"ADD", - "AVERAGE_POOL_2D", - "CONCATENATION", - "CONV_2D", - "DEPTHWISE_CONV_2D", - "DEPTH_TO_SPACE", - "DEQUANTIZE", - "EMBEDDING_LOOKUP", - "FLOOR", - "FULLY_CONNECTED", - "HASHTABLE_LOOKUP", - "L2_NORMALIZATION", - "L2_POOL_2D", - "LOCAL_RESPONSE_NORMALIZATION", - "LOGISTIC", - "LSH_PROJECTION", - "LSTM", - "MAX_POOL_2D", - "MUL", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "RESHAPE", - "RESIZE_BILINEAR", - "RNN", - "SOFTMAX", - "SPACE_TO_DEPTH", - "SVDF", - "TANH", - "CONCAT_EMBEDDINGS", - "SKIP_GRAM", - "CALL", - "CUSTOM", - "EMBEDDING_LOOKUP_SPARSE", - "PAD", - "UNIDIRECTIONAL_SEQUENCE_RNN", - "GATHER", - "BATCH_TO_SPACE_ND", - "SPACE_TO_BATCH_ND", - "TRANSPOSE", - "MEAN", - "SUB", - "DIV", - "SQUEEZE", - "UNIDIRECTIONAL_SEQUENCE_LSTM", - "STRIDED_SLICE", - "BIDIRECTIONAL_SEQUENCE_RNN", - "EXP", - "TOPK_V2", - "SPLIT", - "LOG_SOFTMAX", - "DELEGATE", - "BIDIRECTIONAL_SEQUENCE_LSTM", - "CAST", - "PRELU", - "MAXIMUM", - "ARG_MAX", - "MINIMUM", - "LESS", - "NEG", - "PADV2", - "GREATER", - "GREATER_EQUAL", - "LESS_EQUAL", - "SELECT", - "SLICE", - "SIN", - "TRANSPOSE_CONV", - "SPARSE_TO_DENSE", - "TILE", - "EXPAND_DIMS", - "EQUAL", - "NOT_EQUAL", - "LOG", - "SUM", - "SQRT", - "RSQRT", - "SHAPE", - "POW", - "ARG_MIN", - "FAKE_QUANT", - "REDUCE_PROD", - "REDUCE_MAX", - "PACK", - "LOGICAL_OR", - "ONE_HOT", - "LOGICAL_AND", - "LOGICAL_NOT", - "UNPACK", - "REDUCE_MIN", - "FLOOR_DIV", - "REDUCE_ANY", - "SQUARE", - "ZEROS_LIKE", - "FILL", - "FLOOR_MOD", - "RANGE", - "RESIZE_NEAREST_NEIGHBOR", - "LEAKY_RELU", - "SQUARED_DIFFERENCE", - "MIRROR_PAD", - "ABS", - "SPLIT_V", - "UNIQUE", - "CEIL", - "REVERSE_V2", - "ADD_N", - "GATHER_ND", - "COS", - "WHERE", - "RANK", - "ELU", - "REVERSE_SEQUENCE", - "MATRIX_DIAG", - "QUANTIZE", - "MATRIX_SET_DIAG", - "ROUND", - "HARD_SWISH", - "IF", - "WHILE", - "NON_MAX_SUPPRESSION_V4", - "NON_MAX_SUPPRESSION_V5", - "SCATTER_ND", - "SELECT_V2", - "DENSIFY", - "SEGMENT_SUM", - "BATCH_MATMUL", - "PLACEHOLDER_FOR_GREATER_OP_CODES", - "CUMSUM", - "CALL_ONCE", - "BROADCAST_TO", - "RFFT2D", - nullptr}; - return names; -} - -inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_RFFT2D)) - return ""; - const size_t index = static_cast(e); - return EnumNamesBuiltinOperator()[index]; -} - -enum BuiltinOptions { - BuiltinOptions_NONE = 0, - BuiltinOptions_Conv2DOptions = 1, - BuiltinOptions_DepthwiseConv2DOptions = 2, - BuiltinOptions_ConcatEmbeddingsOptions = 3, - BuiltinOptions_LSHProjectionOptions = 4, - BuiltinOptions_Pool2DOptions = 5, - BuiltinOptions_SVDFOptions = 6, - BuiltinOptions_RNNOptions = 7, - BuiltinOptions_FullyConnectedOptions = 8, - BuiltinOptions_SoftmaxOptions = 9, - BuiltinOptions_ConcatenationOptions = 10, - BuiltinOptions_AddOptions = 11, - BuiltinOptions_L2NormOptions = 12, - BuiltinOptions_LocalResponseNormalizationOptions = 13, - BuiltinOptions_LSTMOptions = 14, - BuiltinOptions_ResizeBilinearOptions = 15, - BuiltinOptions_CallOptions = 16, - BuiltinOptions_ReshapeOptions = 17, - BuiltinOptions_SkipGramOptions = 18, - BuiltinOptions_SpaceToDepthOptions = 19, - BuiltinOptions_EmbeddingLookupSparseOptions = 20, - BuiltinOptions_MulOptions = 21, - BuiltinOptions_PadOptions = 22, - BuiltinOptions_GatherOptions = 23, - BuiltinOptions_BatchToSpaceNDOptions = 24, - BuiltinOptions_SpaceToBatchNDOptions = 25, - BuiltinOptions_TransposeOptions = 26, - BuiltinOptions_ReducerOptions = 27, - BuiltinOptions_SubOptions = 28, - BuiltinOptions_DivOptions = 29, - BuiltinOptions_SqueezeOptions = 30, - BuiltinOptions_SequenceRNNOptions = 31, - BuiltinOptions_StridedSliceOptions = 32, - BuiltinOptions_ExpOptions = 33, - BuiltinOptions_TopKV2Options = 34, - BuiltinOptions_SplitOptions = 35, - BuiltinOptions_LogSoftmaxOptions = 36, - BuiltinOptions_CastOptions = 37, - BuiltinOptions_DequantizeOptions = 38, - BuiltinOptions_MaximumMinimumOptions = 39, - BuiltinOptions_ArgMaxOptions = 40, - BuiltinOptions_LessOptions = 41, - BuiltinOptions_NegOptions = 42, - BuiltinOptions_PadV2Options = 43, - BuiltinOptions_GreaterOptions = 44, - BuiltinOptions_GreaterEqualOptions = 45, - BuiltinOptions_LessEqualOptions = 46, - BuiltinOptions_SelectOptions = 47, - BuiltinOptions_SliceOptions = 48, - BuiltinOptions_TransposeConvOptions = 49, - BuiltinOptions_SparseToDenseOptions = 50, - BuiltinOptions_TileOptions = 51, - BuiltinOptions_ExpandDimsOptions = 52, - BuiltinOptions_EqualOptions = 53, - BuiltinOptions_NotEqualOptions = 54, - BuiltinOptions_ShapeOptions = 55, - BuiltinOptions_PowOptions = 56, - BuiltinOptions_ArgMinOptions = 57, - BuiltinOptions_FakeQuantOptions = 58, - BuiltinOptions_PackOptions = 59, - BuiltinOptions_LogicalOrOptions = 60, - BuiltinOptions_OneHotOptions = 61, - BuiltinOptions_LogicalAndOptions = 62, - BuiltinOptions_LogicalNotOptions = 63, - BuiltinOptions_UnpackOptions = 64, - BuiltinOptions_FloorDivOptions = 65, - BuiltinOptions_SquareOptions = 66, - BuiltinOptions_ZerosLikeOptions = 67, - BuiltinOptions_FillOptions = 68, - BuiltinOptions_BidirectionalSequenceLSTMOptions = 69, - BuiltinOptions_BidirectionalSequenceRNNOptions = 70, - BuiltinOptions_UnidirectionalSequenceLSTMOptions = 71, - BuiltinOptions_FloorModOptions = 72, - BuiltinOptions_RangeOptions = 73, - BuiltinOptions_ResizeNearestNeighborOptions = 74, - BuiltinOptions_LeakyReluOptions = 75, - BuiltinOptions_SquaredDifferenceOptions = 76, - BuiltinOptions_MirrorPadOptions = 77, - BuiltinOptions_AbsOptions = 78, - BuiltinOptions_SplitVOptions = 79, - BuiltinOptions_UniqueOptions = 80, - BuiltinOptions_ReverseV2Options = 81, - BuiltinOptions_AddNOptions = 82, - BuiltinOptions_GatherNdOptions = 83, - BuiltinOptions_CosOptions = 84, - BuiltinOptions_WhereOptions = 85, - BuiltinOptions_RankOptions = 86, - BuiltinOptions_ReverseSequenceOptions = 87, - BuiltinOptions_MatrixDiagOptions = 88, - BuiltinOptions_QuantizeOptions = 89, - BuiltinOptions_MatrixSetDiagOptions = 90, - BuiltinOptions_HardSwishOptions = 91, - BuiltinOptions_IfOptions = 92, - BuiltinOptions_WhileOptions = 93, - BuiltinOptions_DepthToSpaceOptions = 94, - BuiltinOptions_NonMaxSuppressionV4Options = 95, - BuiltinOptions_NonMaxSuppressionV5Options = 96, - BuiltinOptions_ScatterNdOptions = 97, - BuiltinOptions_SelectV2Options = 98, - BuiltinOptions_DensifyOptions = 99, - BuiltinOptions_SegmentSumOptions = 100, - BuiltinOptions_BatchMatMulOptions = 101, - BuiltinOptions_CumsumOptions = 102, - BuiltinOptions_CallOnceOptions = 103, - BuiltinOptions_BroadcastToOptions = 104, - BuiltinOptions_Rfft2dOptions = 105, - BuiltinOptions_MIN = BuiltinOptions_NONE, - BuiltinOptions_MAX = BuiltinOptions_Rfft2dOptions -}; - -inline const BuiltinOptions (&EnumValuesBuiltinOptions())[106] { - static const BuiltinOptions values[] = { - BuiltinOptions_NONE, - BuiltinOptions_Conv2DOptions, - BuiltinOptions_DepthwiseConv2DOptions, - BuiltinOptions_ConcatEmbeddingsOptions, - BuiltinOptions_LSHProjectionOptions, - BuiltinOptions_Pool2DOptions, - BuiltinOptions_SVDFOptions, - BuiltinOptions_RNNOptions, - BuiltinOptions_FullyConnectedOptions, - BuiltinOptions_SoftmaxOptions, - BuiltinOptions_ConcatenationOptions, - BuiltinOptions_AddOptions, - BuiltinOptions_L2NormOptions, - BuiltinOptions_LocalResponseNormalizationOptions, - BuiltinOptions_LSTMOptions, - BuiltinOptions_ResizeBilinearOptions, - BuiltinOptions_CallOptions, - BuiltinOptions_ReshapeOptions, - BuiltinOptions_SkipGramOptions, - BuiltinOptions_SpaceToDepthOptions, - BuiltinOptions_EmbeddingLookupSparseOptions, - BuiltinOptions_MulOptions, - BuiltinOptions_PadOptions, - BuiltinOptions_GatherOptions, - BuiltinOptions_BatchToSpaceNDOptions, - BuiltinOptions_SpaceToBatchNDOptions, - BuiltinOptions_TransposeOptions, - BuiltinOptions_ReducerOptions, - BuiltinOptions_SubOptions, - BuiltinOptions_DivOptions, - BuiltinOptions_SqueezeOptions, - BuiltinOptions_SequenceRNNOptions, - BuiltinOptions_StridedSliceOptions, - BuiltinOptions_ExpOptions, - BuiltinOptions_TopKV2Options, - BuiltinOptions_SplitOptions, - BuiltinOptions_LogSoftmaxOptions, - BuiltinOptions_CastOptions, - BuiltinOptions_DequantizeOptions, - BuiltinOptions_MaximumMinimumOptions, - BuiltinOptions_ArgMaxOptions, - BuiltinOptions_LessOptions, - BuiltinOptions_NegOptions, - BuiltinOptions_PadV2Options, - BuiltinOptions_GreaterOptions, - BuiltinOptions_GreaterEqualOptions, - BuiltinOptions_LessEqualOptions, - BuiltinOptions_SelectOptions, - BuiltinOptions_SliceOptions, - BuiltinOptions_TransposeConvOptions, - BuiltinOptions_SparseToDenseOptions, - BuiltinOptions_TileOptions, - BuiltinOptions_ExpandDimsOptions, - BuiltinOptions_EqualOptions, - BuiltinOptions_NotEqualOptions, - BuiltinOptions_ShapeOptions, - BuiltinOptions_PowOptions, - BuiltinOptions_ArgMinOptions, - BuiltinOptions_FakeQuantOptions, - BuiltinOptions_PackOptions, - BuiltinOptions_LogicalOrOptions, - BuiltinOptions_OneHotOptions, - BuiltinOptions_LogicalAndOptions, - BuiltinOptions_LogicalNotOptions, - BuiltinOptions_UnpackOptions, - BuiltinOptions_FloorDivOptions, - BuiltinOptions_SquareOptions, - BuiltinOptions_ZerosLikeOptions, - BuiltinOptions_FillOptions, - BuiltinOptions_BidirectionalSequenceLSTMOptions, - BuiltinOptions_BidirectionalSequenceRNNOptions, - BuiltinOptions_UnidirectionalSequenceLSTMOptions, - BuiltinOptions_FloorModOptions, - BuiltinOptions_RangeOptions, - BuiltinOptions_ResizeNearestNeighborOptions, - BuiltinOptions_LeakyReluOptions, - BuiltinOptions_SquaredDifferenceOptions, - BuiltinOptions_MirrorPadOptions, - BuiltinOptions_AbsOptions, - BuiltinOptions_SplitVOptions, - BuiltinOptions_UniqueOptions, - BuiltinOptions_ReverseV2Options, - BuiltinOptions_AddNOptions, - BuiltinOptions_GatherNdOptions, - BuiltinOptions_CosOptions, - BuiltinOptions_WhereOptions, - BuiltinOptions_RankOptions, - BuiltinOptions_ReverseSequenceOptions, - BuiltinOptions_MatrixDiagOptions, - BuiltinOptions_QuantizeOptions, - BuiltinOptions_MatrixSetDiagOptions, - BuiltinOptions_HardSwishOptions, - BuiltinOptions_IfOptions, - BuiltinOptions_WhileOptions, - BuiltinOptions_DepthToSpaceOptions, - BuiltinOptions_NonMaxSuppressionV4Options, - BuiltinOptions_NonMaxSuppressionV5Options, - BuiltinOptions_ScatterNdOptions, - BuiltinOptions_SelectV2Options, - BuiltinOptions_DensifyOptions, - BuiltinOptions_SegmentSumOptions, - BuiltinOptions_BatchMatMulOptions, - BuiltinOptions_CumsumOptions, - BuiltinOptions_CallOnceOptions, - BuiltinOptions_BroadcastToOptions, - BuiltinOptions_Rfft2dOptions}; - return values; -} - -inline const char * const *EnumNamesBuiltinOptions() { - static const char *const names[107] = {"NONE", - "Conv2DOptions", - "DepthwiseConv2DOptions", - "ConcatEmbeddingsOptions", - "LSHProjectionOptions", - "Pool2DOptions", - "SVDFOptions", - "RNNOptions", - "FullyConnectedOptions", - "SoftmaxOptions", - "ConcatenationOptions", - "AddOptions", - "L2NormOptions", - "LocalResponseNormalizationOptions", - "LSTMOptions", - "ResizeBilinearOptions", - "CallOptions", - "ReshapeOptions", - "SkipGramOptions", - "SpaceToDepthOptions", - "EmbeddingLookupSparseOptions", - "MulOptions", - "PadOptions", - "GatherOptions", - "BatchToSpaceNDOptions", - "SpaceToBatchNDOptions", - "TransposeOptions", - "ReducerOptions", - "SubOptions", - "DivOptions", - "SqueezeOptions", - "SequenceRNNOptions", - "StridedSliceOptions", - "ExpOptions", - "TopKV2Options", - "SplitOptions", - "LogSoftmaxOptions", - "CastOptions", - "DequantizeOptions", - "MaximumMinimumOptions", - "ArgMaxOptions", - "LessOptions", - "NegOptions", - "PadV2Options", - "GreaterOptions", - "GreaterEqualOptions", - "LessEqualOptions", - "SelectOptions", - "SliceOptions", - "TransposeConvOptions", - "SparseToDenseOptions", - "TileOptions", - "ExpandDimsOptions", - "EqualOptions", - "NotEqualOptions", - "ShapeOptions", - "PowOptions", - "ArgMinOptions", - "FakeQuantOptions", - "PackOptions", - "LogicalOrOptions", - "OneHotOptions", - "LogicalAndOptions", - "LogicalNotOptions", - "UnpackOptions", - "FloorDivOptions", - "SquareOptions", - "ZerosLikeOptions", - "FillOptions", - "BidirectionalSequenceLSTMOptions", - "BidirectionalSequenceRNNOptions", - "UnidirectionalSequenceLSTMOptions", - "FloorModOptions", - "RangeOptions", - "ResizeNearestNeighborOptions", - "LeakyReluOptions", - "SquaredDifferenceOptions", - "MirrorPadOptions", - "AbsOptions", - "SplitVOptions", - "UniqueOptions", - "ReverseV2Options", - "AddNOptions", - "GatherNdOptions", - "CosOptions", - "WhereOptions", - "RankOptions", - "ReverseSequenceOptions", - "MatrixDiagOptions", - "QuantizeOptions", - "MatrixSetDiagOptions", - "HardSwishOptions", - "IfOptions", - "WhileOptions", - "DepthToSpaceOptions", - "NonMaxSuppressionV4Options", - "NonMaxSuppressionV5Options", - "ScatterNdOptions", - "SelectV2Options", - "DensifyOptions", - "SegmentSumOptions", - "BatchMatMulOptions", - "CumsumOptions", - "CallOnceOptions", - "BroadcastToOptions", - "Rfft2dOptions", - nullptr}; - return names; -} - -inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { - if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, - BuiltinOptions_Rfft2dOptions)) - return ""; - const size_t index = static_cast(e); - return EnumNamesBuiltinOptions()[index]; -} - -template struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NONE; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions; -}; - -template<> struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; -}; - -template <> -struct BuiltinOptionsTraits { - static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; -}; - -struct BuiltinOptionsUnion { - BuiltinOptions type; - void *value; - - BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {} - BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(BuiltinOptions_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - BuiltinOptionsUnion(const BuiltinOptionsUnion &) FLATBUFFERS_NOEXCEPT; - BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT - { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~BuiltinOptionsUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = BuiltinOptionsTraits::enum_value; - if (type != BuiltinOptions_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL - - static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::Conv2DOptionsT *AsConv2DOptions() { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Conv2DOptionsT *AsConv2DOptions() const { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() const { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Pool2DOptionsT *AsPool2DOptions() { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Pool2DOptionsT *AsPool2DOptions() const { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SVDFOptionsT *AsSVDFOptions() { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SVDFOptionsT *AsSVDFOptions() const { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RNNOptionsT *AsRNNOptions() { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RNNOptionsT *AsRNNOptions() const { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() const { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SoftmaxOptionsT *AsSoftmaxOptions() { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SoftmaxOptionsT *AsSoftmaxOptions() const { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatenationOptionsT *AsConcatenationOptions() { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatenationOptionsT *AsConcatenationOptions() const { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AddOptionsT *AsAddOptions() { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddOptionsT *AsAddOptions() const { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::L2NormOptionsT *AsL2NormOptions() { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::L2NormOptionsT *AsL2NormOptions() const { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSTMOptionsT *AsLSTMOptions() { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSTMOptionsT *AsLSTMOptions() const { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() const { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOptionsT *AsCallOptions() { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOptionsT *AsCallOptions() const { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReshapeOptionsT *AsReshapeOptions() { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReshapeOptionsT *AsReshapeOptions() const { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SkipGramOptionsT *AsSkipGramOptions() { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SkipGramOptionsT *AsSkipGramOptions() const { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MulOptionsT *AsMulOptions() { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MulOptionsT *AsMulOptions() const { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PadOptionsT *AsPadOptions() { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PadOptionsT *AsPadOptions() const { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GatherOptionsT *AsGatherOptions() { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GatherOptionsT *AsGatherOptions() const { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TransposeOptionsT *AsTransposeOptions() { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TransposeOptionsT *AsTransposeOptions() const { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReducerOptionsT *AsReducerOptions() { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReducerOptionsT *AsReducerOptions() const { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SubOptionsT *AsSubOptions() { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SubOptionsT *AsSubOptions() const { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DivOptionsT *AsDivOptions() { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DivOptionsT *AsDivOptions() const { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SqueezeOptionsT *AsSqueezeOptions() { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SqueezeOptionsT *AsSqueezeOptions() const { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() const { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::StridedSliceOptionsT *AsStridedSliceOptions() { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::StridedSliceOptionsT *AsStridedSliceOptions() const { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ExpOptionsT *AsExpOptions() { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ExpOptionsT *AsExpOptions() const { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TopKV2OptionsT *AsTopKV2Options() { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::TopKV2OptionsT *AsTopKV2Options() const { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::SplitOptionsT *AsSplitOptions() { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SplitOptionsT *AsSplitOptions() const { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CastOptionsT *AsCastOptions() { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CastOptionsT *AsCastOptions() const { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DequantizeOptionsT *AsDequantizeOptions() { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DequantizeOptionsT *AsDequantizeOptions() const { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ArgMaxOptionsT *AsArgMaxOptions() { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ArgMaxOptionsT *AsArgMaxOptions() const { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LessOptionsT *AsLessOptions() { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LessOptionsT *AsLessOptions() const { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NegOptionsT *AsNegOptions() { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::NegOptionsT *AsNegOptions() const { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PadV2OptionsT *AsPadV2Options() { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::PadV2OptionsT *AsPadV2Options() const { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::GreaterOptionsT *AsGreaterOptions() { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GreaterOptionsT *AsGreaterOptions() const { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() const { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LessEqualOptionsT *AsLessEqualOptions() { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LessEqualOptionsT *AsLessEqualOptions() const { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SelectOptionsT *AsSelectOptions() { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SelectOptionsT *AsSelectOptions() const { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SliceOptionsT *AsSliceOptions() { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SliceOptionsT *AsSliceOptions() const { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TransposeConvOptionsT *AsTransposeConvOptions() { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TransposeConvOptionsT *AsTransposeConvOptions() const { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() const { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TileOptionsT *AsTileOptions() { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TileOptionsT *AsTileOptions() const { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ExpandDimsOptionsT *AsExpandDimsOptions() { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ExpandDimsOptionsT *AsExpandDimsOptions() const { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::EqualOptionsT *AsEqualOptions() { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::EqualOptionsT *AsEqualOptions() const { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NotEqualOptionsT *AsNotEqualOptions() { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::NotEqualOptionsT *AsNotEqualOptions() const { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ShapeOptionsT *AsShapeOptions() { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ShapeOptionsT *AsShapeOptions() const { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PowOptionsT *AsPowOptions() { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PowOptionsT *AsPowOptions() const { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ArgMinOptionsT *AsArgMinOptions() { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ArgMinOptionsT *AsArgMinOptions() const { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FakeQuantOptionsT *AsFakeQuantOptions() { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FakeQuantOptionsT *AsFakeQuantOptions() const { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PackOptionsT *AsPackOptions() { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PackOptionsT *AsPackOptions() const { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalOrOptionsT *AsLogicalOrOptions() { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalOrOptionsT *AsLogicalOrOptions() const { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::OneHotOptionsT *AsOneHotOptions() { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::OneHotOptionsT *AsOneHotOptions() const { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalAndOptionsT *AsLogicalAndOptions() { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalAndOptionsT *AsLogicalAndOptions() const { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalNotOptionsT *AsLogicalNotOptions() { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalNotOptionsT *AsLogicalNotOptions() const { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnpackOptionsT *AsUnpackOptions() { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnpackOptionsT *AsUnpackOptions() const { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FloorDivOptionsT *AsFloorDivOptions() { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FloorDivOptionsT *AsFloorDivOptions() const { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SquareOptionsT *AsSquareOptions() { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SquareOptionsT *AsSquareOptions() const { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ZerosLikeOptionsT *AsZerosLikeOptions() { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ZerosLikeOptionsT *AsZerosLikeOptions() const { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FillOptionsT *AsFillOptions() { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FillOptionsT *AsFillOptions() const { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FloorModOptionsT *AsFloorModOptions() { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FloorModOptionsT *AsFloorModOptions() const { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RangeOptionsT *AsRangeOptions() { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RangeOptionsT *AsRangeOptions() const { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LeakyReluOptionsT *AsLeakyReluOptions() { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LeakyReluOptionsT *AsLeakyReluOptions() const { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MirrorPadOptionsT *AsMirrorPadOptions() { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MirrorPadOptionsT *AsMirrorPadOptions() const { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AbsOptionsT *AsAbsOptions() { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AbsOptionsT *AsAbsOptions() const { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SplitVOptionsT *AsSplitVOptions() { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SplitVOptionsT *AsSplitVOptions() const { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UniqueOptionsT *AsUniqueOptions() { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UniqueOptionsT *AsUniqueOptions() const { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReverseV2OptionsT *AsReverseV2Options() { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReverseV2OptionsT *AsReverseV2Options() const { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::AddNOptionsT *AsAddNOptions() { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddNOptionsT *AsAddNOptions() const { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GatherNdOptionsT *AsGatherNdOptions() { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GatherNdOptionsT *AsGatherNdOptions() const { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CosOptionsT *AsCosOptions() { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CosOptionsT *AsCosOptions() const { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::WhereOptionsT *AsWhereOptions() { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::WhereOptionsT *AsWhereOptions() const { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RankOptionsT *AsRankOptions() { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RankOptionsT *AsRankOptions() const { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() const { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() const { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::QuantizeOptionsT *AsQuantizeOptions() { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::QuantizeOptionsT *AsQuantizeOptions() const { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HardSwishOptionsT *AsHardSwishOptions() { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HardSwishOptionsT *AsHardSwishOptions() const { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::IfOptionsT *AsIfOptions() { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::IfOptionsT *AsIfOptions() const { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::WhileOptionsT *AsWhileOptions() { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::WhileOptionsT *AsWhileOptions() const { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; - } - tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; - } - tflite::ScatterNdOptionsT *AsScatterNdOptions() { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ScatterNdOptionsT *AsScatterNdOptions() const { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SelectV2OptionsT *AsSelectV2Options() { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::SelectV2OptionsT *AsSelectV2Options() const { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::DensifyOptionsT *AsDensifyOptions() { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DensifyOptionsT *AsDensifyOptions() const { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SegmentSumOptionsT *AsSegmentSumOptions() { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SegmentSumOptionsT *AsSegmentSumOptions() const { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() const { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CumsumOptionsT *AsCumsumOptions() { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CumsumOptionsT *AsCumsumOptions() const { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOnceOptionsT *AsCallOnceOptions() { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOnceOptionsT *AsCallOnceOptions() const { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BroadcastToOptionsT *AsBroadcastToOptions() { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BroadcastToOptionsT *AsBroadcastToOptions() const { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Rfft2dOptionsT *AsRfft2dOptions() { - return type == BuiltinOptions_Rfft2dOptions - ? reinterpret_cast(value) - : nullptr; - } - const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { - return type == BuiltinOptions_Rfft2dOptions - ? reinterpret_cast(value) - : nullptr; - } -}; - -bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); -bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum Padding { - Padding_SAME = 0, - Padding_VALID = 1, - Padding_MIN = Padding_SAME, - Padding_MAX = Padding_VALID -}; - -inline const Padding (&EnumValuesPadding())[2] { - static const Padding values[] = { - Padding_SAME, - Padding_VALID - }; - return values; -} - -inline const char * const *EnumNamesPadding() { - static const char * const names[3] = { - "SAME", - "VALID", - nullptr - }; - return names; -} - -inline const char *EnumNamePadding(Padding e) { - if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return ""; - const size_t index = static_cast(e); - return EnumNamesPadding()[index]; -} - -enum ActivationFunctionType { - ActivationFunctionType_NONE = 0, - ActivationFunctionType_RELU = 1, - ActivationFunctionType_RELU_N1_TO_1 = 2, - ActivationFunctionType_RELU6 = 3, - ActivationFunctionType_TANH = 4, - ActivationFunctionType_SIGN_BIT = 5, - ActivationFunctionType_MIN = ActivationFunctionType_NONE, - ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT -}; - -inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { - static const ActivationFunctionType values[] = { - ActivationFunctionType_NONE, - ActivationFunctionType_RELU, - ActivationFunctionType_RELU_N1_TO_1, - ActivationFunctionType_RELU6, - ActivationFunctionType_TANH, - ActivationFunctionType_SIGN_BIT - }; - return values; -} - -inline const char * const *EnumNamesActivationFunctionType() { - static const char * const names[7] = { - "NONE", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "TANH", - "SIGN_BIT", - nullptr - }; - return names; -} - -inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { - if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return ""; - const size_t index = static_cast(e); - return EnumNamesActivationFunctionType()[index]; -} - -enum LSHProjectionType { - LSHProjectionType_UNKNOWN = 0, - LSHProjectionType_SPARSE = 1, - LSHProjectionType_DENSE = 2, - LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, - LSHProjectionType_MAX = LSHProjectionType_DENSE -}; - -inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] { - static const LSHProjectionType values[] = { - LSHProjectionType_UNKNOWN, - LSHProjectionType_SPARSE, - LSHProjectionType_DENSE - }; - return values; -} - -inline const char * const *EnumNamesLSHProjectionType() { - static const char * const names[4] = { - "UNKNOWN", - "SPARSE", - "DENSE", - nullptr - }; - return names; -} - -inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { - if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSHProjectionType()[index]; -} - -enum FullyConnectedOptionsWeightsFormat { - FullyConnectedOptionsWeightsFormat_DEFAULT = 0, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, - FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 -}; - -inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] { - static const FullyConnectedOptionsWeightsFormat values[] = { - FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 - }; - return values; -} - -inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { - static const char * const names[3] = { - "DEFAULT", - "SHUFFLED4x16INT8", - nullptr - }; - return names; -} - -inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { - if (flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return ""; - const size_t index = static_cast(e); - return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; -} - -enum LSTMKernelType { - LSTMKernelType_FULL = 0, - LSTMKernelType_BASIC = 1, - LSTMKernelType_MIN = LSTMKernelType_FULL, - LSTMKernelType_MAX = LSTMKernelType_BASIC -}; - -inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] { - static const LSTMKernelType values[] = { - LSTMKernelType_FULL, - LSTMKernelType_BASIC - }; - return values; -} - -inline const char * const *EnumNamesLSTMKernelType() { - static const char * const names[3] = { - "FULL", - "BASIC", - nullptr - }; - return names; -} - -inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { - if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSTMKernelType()[index]; -} - -enum CombinerType { - CombinerType_SUM = 0, - CombinerType_MEAN = 1, - CombinerType_SQRTN = 2, - CombinerType_MIN = CombinerType_SUM, - CombinerType_MAX = CombinerType_SQRTN -}; - -inline const CombinerType (&EnumValuesCombinerType())[3] { - static const CombinerType values[] = { - CombinerType_SUM, - CombinerType_MEAN, - CombinerType_SQRTN - }; - return values; -} - -inline const char * const *EnumNamesCombinerType() { - static const char * const names[4] = { - "SUM", - "MEAN", - "SQRTN", - nullptr - }; - return names; -} - -inline const char *EnumNameCombinerType(CombinerType e) { - if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return ""; - const size_t index = static_cast(e); - return EnumNamesCombinerType()[index]; -} - -enum MirrorPadMode { - MirrorPadMode_REFLECT = 0, - MirrorPadMode_SYMMETRIC = 1, - MirrorPadMode_MIN = MirrorPadMode_REFLECT, - MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC -}; - -inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { - static const MirrorPadMode values[] = { - MirrorPadMode_REFLECT, - MirrorPadMode_SYMMETRIC - }; - return values; -} - -inline const char * const *EnumNamesMirrorPadMode() { - static const char * const names[3] = { - "REFLECT", - "SYMMETRIC", - nullptr - }; - return names; -} - -inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { - if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesMirrorPadMode()[index]; -} - -enum CustomOptionsFormat { - CustomOptionsFormat_FLEXBUFFERS = 0, - CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, - CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS -}; - -inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { - static const CustomOptionsFormat values[] = { - CustomOptionsFormat_FLEXBUFFERS - }; - return values; -} - -inline const char * const *EnumNamesCustomOptionsFormat() { - static const char * const names[2] = { - "FLEXBUFFERS", - nullptr - }; - return names; -} - -inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { - if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return ""; - const size_t index = static_cast(e); - return EnumNamesCustomOptionsFormat()[index]; -} - -struct CustomQuantizationT : public flatbuffers::NativeTable { - typedef CustomQuantization TableType; - std::vector custom; - CustomQuantizationT() { - } -}; - -struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CustomQuantizationT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CUSTOM = 4 - }; - const flatbuffers::Vector *custom() const { - return GetPointer *>(VT_CUSTOM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_CUSTOM) && - verifier.VerifyVector(custom()) && - verifier.EndTable(); - } - CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CustomQuantizationBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_custom(flatbuffers::Offset> custom) { - fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); - } - explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCustomQuantization( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> custom = 0) { - CustomQuantizationBuilder builder_(_fbb); - builder_.add_custom(custom); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateCustomQuantizationDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *custom = nullptr) { - if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); } - auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - custom__); -} - -flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizationParametersT : public flatbuffers::NativeTable { - typedef QuantizationParameters TableType; - std::vector min; - std::vector max; - std::vector scale; - std::vector zero_point; - tflite::QuantizationDetailsUnion details; - int32_t quantized_dimension; - QuantizationParametersT() - : quantized_dimension(0) { - } -}; - -struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizationParametersT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_SCALE = 8, - VT_ZERO_POINT = 10, - VT_DETAILS_TYPE = 12, - VT_DETAILS = 14, - VT_QUANTIZED_DIMENSION = 16 - }; - const flatbuffers::Vector *min() const { - return GetPointer *>(VT_MIN); - } - const flatbuffers::Vector *max() const { - return GetPointer *>(VT_MAX); - } - const flatbuffers::Vector *scale() const { - return GetPointer *>(VT_SCALE); - } - const flatbuffers::Vector *zero_point() const { - return GetPointer *>(VT_ZERO_POINT); - } - tflite::QuantizationDetails details_type() const { - return static_cast(GetField(VT_DETAILS_TYPE, 0)); - } - const void *details() const { - return GetPointer(VT_DETAILS); - } - template const T *details_as() const; - const tflite::CustomQuantization *details_as_CustomQuantization() const { - return details_type() == tflite::QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; - } - int32_t quantized_dimension() const { - return GetField(VT_QUANTIZED_DIMENSION, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_MIN) && - verifier.VerifyVector(min()) && - VerifyOffset(verifier, VT_MAX) && - verifier.VerifyVector(max()) && - VerifyOffset(verifier, VT_SCALE) && - verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_ZERO_POINT) && - verifier.VerifyVector(zero_point()) && - VerifyField(verifier, VT_DETAILS_TYPE) && - VerifyOffset(verifier, VT_DETAILS) && - VerifyQuantizationDetails(verifier, details(), details_type()) && - VerifyField(verifier, VT_QUANTIZED_DIMENSION) && - verifier.EndTable(); - } - QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as() const { - return details_as_CustomQuantization(); -} - -struct QuantizationParametersBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(flatbuffers::Offset> min) { - fbb_.AddOffset(QuantizationParameters::VT_MIN, min); - } - void add_max(flatbuffers::Offset> max) { - fbb_.AddOffset(QuantizationParameters::VT_MAX, max); - } - void add_scale(flatbuffers::Offset> scale) { - fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); - } - void add_zero_point(flatbuffers::Offset> zero_point) { - fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); - } - void add_details_type(tflite::QuantizationDetails details_type) { - fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); - } - void add_details(flatbuffers::Offset details) { - fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); - } - void add_quantized_dimension(int32_t quantized_dimension) { - fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); - } - explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizationParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> min = 0, - flatbuffers::Offset> max = 0, - flatbuffers::Offset> scale = 0, - flatbuffers::Offset> zero_point = 0, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - QuantizationParametersBuilder builder_(_fbb); - builder_.add_quantized_dimension(quantized_dimension); - builder_.add_details(details); - builder_.add_zero_point(zero_point); - builder_.add_scale(scale); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_details_type(details_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateQuantizationParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *min = nullptr, - const std::vector *max = nullptr, - const std::vector *scale = nullptr, - const std::vector *zero_point = nullptr, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - auto min__ = min ? _fbb.CreateVector(*min) : 0; - auto max__ = max ? _fbb.CreateVector(*max) : 0; - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; - return tflite::CreateQuantizationParameters( - _fbb, - min__, - max__, - scale__, - zero_point__, - details_type, - details, - quantized_dimension); -} - -flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Int32VectorT : public flatbuffers::NativeTable { - typedef Int32Vector TableType; - std::vector values; - Int32VectorT() { - } -}; - -struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Int32VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Int32VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Int32Vector::VT_VALUES, values); - } - explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Int32VectorBuilder &operator=(const Int32VectorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateInt32Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Int32VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateInt32VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateInt32Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint16VectorT : public flatbuffers::NativeTable { - typedef Uint16Vector TableType; - std::vector values; - Uint16VectorT() { - } -}; - -struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint16VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Uint16VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint16Vector::VT_VALUES, values); - } - explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Uint16VectorBuilder &operator=(const Uint16VectorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUint16Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint16VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateUint16VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint16Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint8VectorT : public flatbuffers::NativeTable { - typedef Uint8Vector TableType; - std::vector values; - Uint8VectorT() { - } -}; - -struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint8VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Uint8VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint8Vector::VT_VALUES, values); - } - explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Uint8VectorBuilder &operator=(const Uint8VectorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUint8Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint8VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateUint8VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint8Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DimensionMetadataT : public flatbuffers::NativeTable { - typedef DimensionMetadata TableType; - tflite::DimensionType format; - int32_t dense_size; - tflite::SparseIndexVectorUnion array_segments; - tflite::SparseIndexVectorUnion array_indices; - DimensionMetadataT() - : format(tflite::DimensionType_DENSE), - dense_size(0) { - } -}; - -struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DimensionMetadataT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FORMAT = 4, - VT_DENSE_SIZE = 6, - VT_ARRAY_SEGMENTS_TYPE = 8, - VT_ARRAY_SEGMENTS = 10, - VT_ARRAY_INDICES_TYPE = 12, - VT_ARRAY_INDICES = 14 - }; - tflite::DimensionType format() const { - return static_cast(GetField(VT_FORMAT, 0)); - } - int32_t dense_size() const { - return GetField(VT_DENSE_SIZE, 0); - } - tflite::SparseIndexVector array_segments_type() const { - return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); - } - const void *array_segments() const { - return GetPointer(VT_ARRAY_SEGMENTS); - } - template const T *array_segments_as() const; - const tflite::Int32Vector *array_segments_as_Int32Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint16Vector *array_segments_as_Uint16Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_segments()) : nullptr; - } - tflite::SparseIndexVector array_indices_type() const { - return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); - } - const void *array_indices() const { - return GetPointer(VT_ARRAY_INDICES); - } - template const T *array_indices_as() const; - const tflite::Int32Vector *array_indices_as_Int32Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint16Vector *array_indices_as_Uint16Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_indices()) : nullptr; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FORMAT) && - VerifyField(verifier, VT_DENSE_SIZE) && - VerifyField(verifier, VT_ARRAY_SEGMENTS_TYPE) && - VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && - VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && - VerifyField(verifier, VT_ARRAY_INDICES_TYPE) && - VerifyOffset(verifier, VT_ARRAY_INDICES) && - VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && - verifier.EndTable(); - } - DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint8Vector(); -} - -template<> inline const tflite::Int32Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint8Vector(); -} - -struct DimensionMetadataBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_format(tflite::DimensionType format) { - fbb_.AddElement(DimensionMetadata::VT_FORMAT, static_cast(format), 0); - } - void add_dense_size(int32_t dense_size) { - fbb_.AddElement(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); - } - void add_array_segments_type(tflite::SparseIndexVector array_segments_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast(array_segments_type), 0); - } - void add_array_segments(flatbuffers::Offset array_segments) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); - } - void add_array_indices_type(tflite::SparseIndexVector array_indices_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast(array_indices_type), 0); - } - void add_array_indices(flatbuffers::Offset array_indices) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); - } - explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDimensionMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::DimensionType format = tflite::DimensionType_DENSE, - int32_t dense_size = 0, - tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_segments = 0, - tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_indices = 0) { - DimensionMetadataBuilder builder_(_fbb); - builder_.add_array_indices(array_indices); - builder_.add_array_segments(array_segments); - builder_.add_dense_size(dense_size); - builder_.add_array_indices_type(array_indices_type); - builder_.add_array_segments_type(array_segments_type); - builder_.add_format(format); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparsityParametersT : public flatbuffers::NativeTable { - typedef SparsityParameters TableType; - std::vector traversal_order; - std::vector block_map; - std::vector> dim_metadata; - SparsityParametersT() { - } -}; - -struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparsityParametersT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TRAVERSAL_ORDER = 4, - VT_BLOCK_MAP = 6, - VT_DIM_METADATA = 8 - }; - const flatbuffers::Vector *traversal_order() const { - return GetPointer *>(VT_TRAVERSAL_ORDER); - } - const flatbuffers::Vector *block_map() const { - return GetPointer *>(VT_BLOCK_MAP); - } - const flatbuffers::Vector> *dim_metadata() const { - return GetPointer> *>(VT_DIM_METADATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && - verifier.VerifyVector(traversal_order()) && - VerifyOffset(verifier, VT_BLOCK_MAP) && - verifier.VerifyVector(block_map()) && - VerifyOffset(verifier, VT_DIM_METADATA) && - verifier.VerifyVector(dim_metadata()) && - verifier.VerifyVectorOfTables(dim_metadata()) && - verifier.EndTable(); - } - SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SparsityParametersBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_traversal_order(flatbuffers::Offset> traversal_order) { - fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); - } - void add_block_map(flatbuffers::Offset> block_map) { - fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); - } - void add_dim_metadata(flatbuffers::Offset>> dim_metadata) { - fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); - } - explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SparsityParametersBuilder &operator=(const SparsityParametersBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSparsityParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> traversal_order = 0, - flatbuffers::Offset> block_map = 0, - flatbuffers::Offset>> dim_metadata = 0) { - SparsityParametersBuilder builder_(_fbb); - builder_.add_dim_metadata(dim_metadata); - builder_.add_block_map(block_map); - builder_.add_traversal_order(traversal_order); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSparsityParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *traversal_order = nullptr, - const std::vector *block_map = nullptr, - const std::vector> *dim_metadata = nullptr) { - auto traversal_order__ = traversal_order ? _fbb.CreateVector(*traversal_order) : 0; - auto block_map__ = block_map ? _fbb.CreateVector(*block_map) : 0; - auto dim_metadata__ = dim_metadata ? _fbb.CreateVector>(*dim_metadata) : 0; - return tflite::CreateSparsityParameters( - _fbb, - traversal_order__, - block_map__, - dim_metadata__); -} - -flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorT : public flatbuffers::NativeTable { - typedef Tensor TableType; - std::vector shape; - tflite::TensorType type; - uint32_t buffer; - std::string name; - std::unique_ptr quantization; - bool is_variable; - std::unique_ptr sparsity; - std::vector shape_signature; - TensorT() - : type(tflite::TensorType_FLOAT32), - buffer(0), - is_variable(false) { - } -}; - -struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SHAPE = 4, - VT_TYPE = 6, - VT_BUFFER = 8, - VT_NAME = 10, - VT_QUANTIZATION = 12, - VT_IS_VARIABLE = 14, - VT_SPARSITY = 16, - VT_SHAPE_SIGNATURE = 18 - }; - const flatbuffers::Vector *shape() const { - return GetPointer *>(VT_SHAPE); - } - tflite::TensorType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - const tflite::QuantizationParameters *quantization() const { - return GetPointer(VT_QUANTIZATION); - } - bool is_variable() const { - return GetField(VT_IS_VARIABLE, 0) != 0; - } - const tflite::SparsityParameters *sparsity() const { - return GetPointer(VT_SPARSITY); - } - const flatbuffers::Vector *shape_signature() const { - return GetPointer *>(VT_SHAPE_SIGNATURE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SHAPE) && - verifier.VerifyVector(shape()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_BUFFER) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyOffset(verifier, VT_QUANTIZATION) && - verifier.VerifyTable(quantization()) && - VerifyField(verifier, VT_IS_VARIABLE) && - VerifyOffset(verifier, VT_SPARSITY) && - verifier.VerifyTable(sparsity()) && - VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && - verifier.VerifyVector(shape_signature()) && - verifier.EndTable(); - } - TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_shape(flatbuffers::Offset> shape) { - fbb_.AddOffset(Tensor::VT_SHAPE, shape); - } - void add_type(tflite::TensorType type) { - fbb_.AddElement(Tensor::VT_TYPE, static_cast(type), 0); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Tensor::VT_BUFFER, buffer, 0); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Tensor::VT_NAME, name); - } - void add_quantization(flatbuffers::Offset quantization) { - fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); - } - void add_is_variable(bool is_variable) { - fbb_.AddElement(Tensor::VT_IS_VARIABLE, static_cast(is_variable), 0); - } - void add_sparsity(flatbuffers::Offset sparsity) { - fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); - } - void add_shape_signature(flatbuffers::Offset> shape_signature) { - fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); - } - explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorBuilder &operator=(const TensorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensor( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> shape = 0, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - flatbuffers::Offset name = 0, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - flatbuffers::Offset> shape_signature = 0) { - TensorBuilder builder_(_fbb); - builder_.add_shape_signature(shape_signature); - builder_.add_sparsity(sparsity); - builder_.add_quantization(quantization); - builder_.add_name(name); - builder_.add_buffer(buffer); - builder_.add_shape(shape); - builder_.add_is_variable(is_variable); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *shape = nullptr, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - const char *name = nullptr, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - const std::vector *shape_signature = nullptr) { - auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - auto shape_signature__ = shape_signature ? _fbb.CreateVector(*shape_signature) : 0; - return tflite::CreateTensor( - _fbb, - shape__, - type, - buffer, - name__, - quantization, - is_variable, - sparsity, - shape_signature__); -} - -flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Conv2DOptionsT : public flatbuffers::NativeTable { - typedef Conv2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - Conv2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_w_factor(1), - dilation_h_factor(1) { - } -}; - -struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Conv2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FUSED_ACTIVATION_FUNCTION = 10, - VT_DILATION_W_FACTOR = 12, - VT_DILATION_H_FACTOR = 14 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && - verifier.EndTable(); - } - Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Conv2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Conv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - Conv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Pool2DOptionsT : public flatbuffers::NativeTable { - typedef Pool2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - int32_t filter_width; - int32_t filter_height; - tflite::ActivationFunctionType fused_activation_function; - Pool2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - filter_width(0), - filter_height(0), - fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Pool2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FILTER_WIDTH = 10, - VT_FILTER_HEIGHT = 12, - VT_FUSED_ACTIVATION_FUNCTION = 14 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t filter_width() const { - return GetField(VT_FILTER_WIDTH, 0); - } - int32_t filter_height() const { - return GetField(VT_FILTER_HEIGHT, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FILTER_WIDTH) && - VerifyField(verifier, VT_FILTER_HEIGHT) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Pool2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Pool2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_filter_width(int32_t filter_width) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); - } - void add_filter_height(int32_t filter_height) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePool2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t filter_width = 0, - int32_t filter_height = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - Pool2DOptionsBuilder builder_(_fbb); - builder_.add_filter_height(filter_height); - builder_.add_filter_width(filter_width); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable { - typedef DepthwiseConv2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - int32_t depth_multiplier; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - DepthwiseConv2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - depth_multiplier(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_w_factor(1), - dilation_h_factor(1) { - } -}; - -struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthwiseConv2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_DEPTH_MULTIPLIER = 10, - VT_FUSED_ACTIVATION_FUNCTION = 12, - VT_DILATION_W_FACTOR = 14, - VT_DILATION_H_FACTOR = 16 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t depth_multiplier() const { - return GetField(VT_DEPTH_MULTIPLIER, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_DEPTH_MULTIPLIER) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && - verifier.EndTable(); - } - DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DepthwiseConv2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_depth_multiplier(int32_t depth_multiplier) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthwiseConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t depth_multiplier = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - DepthwiseConv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_depth_multiplier(depth_multiplier); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable { - typedef ConcatEmbeddingsOptions TableType; - int32_t num_channels; - std::vector num_columns_per_channel; - std::vector embedding_dim_per_channel; - ConcatEmbeddingsOptionsT() - : num_channels(0) { - } -}; - -struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatEmbeddingsOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_CHANNELS = 4, - VT_NUM_COLUMNS_PER_CHANNEL = 6, - VT_EMBEDDING_DIM_PER_CHANNEL = 8 - }; - int32_t num_channels() const { - return GetField(VT_NUM_CHANNELS, 0); - } - const flatbuffers::Vector *num_columns_per_channel() const { - return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); - } - const flatbuffers::Vector *embedding_dim_per_channel() const { - return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_CHANNELS) && - VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && - verifier.VerifyVector(num_columns_per_channel()) && - VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && - verifier.VerifyVector(embedding_dim_per_channel()) && - verifier.EndTable(); - } - ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ConcatEmbeddingsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_channels(int32_t num_channels) { - fbb_.AddElement(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); - } - void add_num_columns_per_channel(flatbuffers::Offset> num_columns_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); - } - void add_embedding_dim_per_channel(flatbuffers::Offset> embedding_dim_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel); - } - explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConcatEmbeddingsOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - flatbuffers::Offset> num_columns_per_channel = 0, - flatbuffers::Offset> embedding_dim_per_channel = 0) { - ConcatEmbeddingsOptionsBuilder builder_(_fbb); - builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); - builder_.add_num_columns_per_channel(num_columns_per_channel); - builder_.add_num_channels(num_channels); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateConcatEmbeddingsOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - const std::vector *num_columns_per_channel = nullptr, - const std::vector *embedding_dim_per_channel = nullptr) { - auto num_columns_per_channel__ = num_columns_per_channel ? _fbb.CreateVector(*num_columns_per_channel) : 0; - auto embedding_dim_per_channel__ = embedding_dim_per_channel ? _fbb.CreateVector(*embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - num_channels, - num_columns_per_channel__, - embedding_dim_per_channel__); -} - -flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSHProjectionOptionsT : public flatbuffers::NativeTable { - typedef LSHProjectionOptions TableType; - tflite::LSHProjectionType type; - LSHProjectionOptionsT() - : type(tflite::LSHProjectionType_UNKNOWN) { - } -}; - -struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSHProjectionOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4 - }; - tflite::LSHProjectionType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSHProjectionOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(tflite::LSHProjectionType type) { - fbb_.AddElement(LSHProjectionOptions::VT_TYPE, static_cast(type), 0); - } - explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSHProjectionOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) { - LSHProjectionOptionsBuilder builder_(_fbb); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SVDFOptionsT : public flatbuffers::NativeTable { - typedef SVDFOptions TableType; - int32_t rank; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - SVDFOptionsT() - : rank(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } -}; - -struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SVDFOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RANK = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - int32_t rank() const { - return GetField(VT_RANK, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RANK) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SVDFOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_rank(int32_t rank) { - fbb_.AddElement(SVDFOptions::VT_RANK, rank, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSVDFOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t rank = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SVDFOptionsBuilder builder_(_fbb); - builder_.add_rank(rank); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RNNOptionsT : public flatbuffers::NativeTable { - typedef RNNOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - RNNOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } -}; - -struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RNNOptionsBuilder &operator=(const RNNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - RNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef SequenceRNNOptions TableType; - bool time_major; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - SequenceRNNOptionsT() - : time_major(false), - fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } -}; - -struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SequenceRNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SequenceRNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(SequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceRNNOptions TableType; - bool time_major; - tflite::ActivationFunctionType fused_activation_function; - bool merge_outputs; - bool asymmetric_quantize_inputs; - BidirectionalSequenceRNNOptionsT() - : time_major(false), - fused_activation_function(tflite::ActivationFunctionType_NONE), - merge_outputs(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceRNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_MERGE_OUTPUTS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_MERGE_OUTPUTS) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BidirectionalSequenceRNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool merge_outputs = false, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FullyConnectedOptionsT : public flatbuffers::NativeTable { - typedef FullyConnectedOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - tflite::FullyConnectedOptionsWeightsFormat weights_format; - bool keep_num_dims; - bool asymmetric_quantize_inputs; - FullyConnectedOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - weights_format(tflite::FullyConnectedOptionsWeightsFormat_DEFAULT), - keep_num_dims(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FullyConnectedOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_WEIGHTS_FORMAT = 6, - VT_KEEP_NUM_DIMS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - tflite::FullyConnectedOptionsWeightsFormat weights_format() const { - return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); - } - bool keep_num_dims() const { - return GetField(VT_KEEP_NUM_DIMS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_WEIGHTS_FORMAT) && - VerifyField(verifier, VT_KEEP_NUM_DIMS) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FullyConnectedOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_weights_format(tflite::FullyConnectedOptionsWeightsFormat weights_format) { - fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); - } - void add_keep_num_dims(bool keep_num_dims) { - fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFullyConnectedOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT, - bool keep_num_dims = false, - bool asymmetric_quantize_inputs = false) { - FullyConnectedOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_keep_num_dims(keep_num_dims); - builder_.add_weights_format(weights_format); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SoftmaxOptionsT : public flatbuffers::NativeTable { - typedef SoftmaxOptions TableType; - float beta; - SoftmaxOptionsT() - : beta(0.0f) { - } -}; - -struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SoftmaxOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BETA = 4 - }; - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BETA) && - verifier.EndTable(); - } - SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SoftmaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_beta(float beta) { - fbb_.AddElement(SoftmaxOptions::VT_BETA, beta, 0.0f); - } - explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float beta = 0.0f) { - SoftmaxOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ConcatenationOptionsT : public flatbuffers::NativeTable { - typedef ConcatenationOptions TableType; - int32_t axis; - tflite::ActivationFunctionType fused_activation_function; - ConcatenationOptionsT() - : axis(0), - fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatenationOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ConcatenationOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(ConcatenationOptions::VT_AXIS, axis, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConcatenationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - ConcatenationOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AddOptionsT : public flatbuffers::NativeTable { - typedef AddOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool pot_scale_int16; - AddOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - pot_scale_int16(true) { - } -}; - -struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_POT_SCALE_INT16) && - verifier.EndTable(); - } - AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AddOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(AddOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AddOptionsBuilder &operator=(const AddOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAddOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - AddOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MulOptionsT : public flatbuffers::NativeTable { - typedef MulOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - MulOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MulOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MulOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MulOptionsBuilder &operator=(const MulOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - MulOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct L2NormOptionsT : public flatbuffers::NativeTable { - typedef L2NormOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - L2NormOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef L2NormOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct L2NormOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateL2NormOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - L2NormOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable { - typedef LocalResponseNormalizationOptions TableType; - int32_t radius; - float bias; - float alpha; - float beta; - LocalResponseNormalizationOptionsT() - : radius(0), - bias(0.0f), - alpha(0.0f), - beta(0.0f) { - } -}; - -struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LocalResponseNormalizationOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RADIUS = 4, - VT_BIAS = 6, - VT_ALPHA = 8, - VT_BETA = 10 - }; - int32_t radius() const { - return GetField(VT_RADIUS, 0); - } - float bias() const { - return GetField(VT_BIAS, 0.0f); - } - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RADIUS) && - VerifyField(verifier, VT_BIAS) && - VerifyField(verifier, VT_ALPHA) && - VerifyField(verifier, VT_BETA) && - verifier.EndTable(); - } - LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LocalResponseNormalizationOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_radius(int32_t radius) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); - } - void add_bias(float bias) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); - } - void add_alpha(float alpha) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); - } - void add_beta(float beta) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); - } - explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LocalResponseNormalizationOptionsBuilder &operator=(const LocalResponseNormalizationOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t radius = 0, - float bias = 0.0f, - float alpha = 0.0f, - float beta = 0.0f) { - LocalResponseNormalizationOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - builder_.add_alpha(alpha); - builder_.add_bias(bias); - builder_.add_radius(radius); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSTMOptionsT : public flatbuffers::NativeTable { - typedef LSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - tflite::LSTMKernelType kernel_type; - bool asymmetric_quantize_inputs; - LSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - kernel_type(tflite::LSTMKernelType_FULL), - asymmetric_quantize_inputs(false) { - } -}; - -struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_KERNEL_TYPE = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - tflite::LSTMKernelType kernel_type() const { - return static_cast(GetField(VT_KERNEL_TYPE, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_KERNEL_TYPE) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_kernel_type(tflite::LSTMKernelType kernel_type) { - fbb_.AddElement(LSTMOptions::VT_KERNEL_TYPE, static_cast(kernel_type), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL, - bool asymmetric_quantize_inputs = false) { - LSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_kernel_type(kernel_type); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef UnidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - bool time_major; - bool asymmetric_quantize_inputs; - UnidirectionalSequenceLSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - time_major(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_TIME_MAJOR = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnidirectionalSequenceLSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_time_major(bool time_major) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UnidirectionalSequenceLSTMOptionsBuilder &operator=(const UnidirectionalSequenceLSTMOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool time_major = false, - bool asymmetric_quantize_inputs = false) { - UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - bool merge_outputs; - bool time_major; - bool asymmetric_quantize_inputs; - BidirectionalSequenceLSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - merge_outputs(false), - time_major(true), - asymmetric_quantize_inputs(false) { - } -}; - -struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceLSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_MERGE_OUTPUTS = 10, - VT_TIME_MAJOR = 12, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 1) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_MERGE_OUTPUTS) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BidirectionalSequenceLSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BidirectionalSequenceLSTMOptionsBuilder &operator=(const BidirectionalSequenceLSTMOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool merge_outputs = false, - bool time_major = true, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { - typedef ResizeBilinearOptions TableType; - bool align_corners; - bool half_pixel_centers; - ResizeBilinearOptionsT() - : align_corners(false), - half_pixel_centers(false) { - } -}; - -struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeBilinearOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 8, - VT_HALF_PIXEL_CENTERS = 10 - }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS) && - verifier.EndTable(); - } - ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeBilinearOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); - } - explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResizeBilinearOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeBilinearOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable { - typedef ResizeNearestNeighborOptions TableType; - bool align_corners; - bool half_pixel_centers; - ResizeNearestNeighborOptionsT() - : align_corners(false), - half_pixel_centers(false) { - } -}; - -struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeNearestNeighborOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 4, - VT_HALF_PIXEL_CENTERS = 6 - }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS) && - verifier.EndTable(); - } - ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeNearestNeighborOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); - } - explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResizeNearestNeighborOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeNearestNeighborOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CallOptionsT : public flatbuffers::NativeTable { - typedef CallOptions TableType; - uint32_t subgraph; - CallOptionsT() - : subgraph(0) { - } -}; - -struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SUBGRAPH = 4 - }; - uint32_t subgraph() const { - return GetField(VT_SUBGRAPH, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SUBGRAPH) && - verifier.EndTable(); - } - CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CallOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_subgraph(uint32_t subgraph) { - fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); - } - explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CallOptionsBuilder &operator=(const CallOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCallOptions( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t subgraph = 0) { - CallOptionsBuilder builder_(_fbb); - builder_.add_subgraph(subgraph); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadOptionsT : public flatbuffers::NativeTable { - typedef PadOptions TableType; - PadOptionsT() { - } -}; - -struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PadOptionsBuilder &operator=(const PadOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PadOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadV2OptionsT : public flatbuffers::NativeTable { - typedef PadV2Options TableType; - PadV2OptionsT() { - } -}; - -struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - PadV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReshapeOptionsT : public flatbuffers::NativeTable { - typedef ReshapeOptions TableType; - std::vector new_shape; - ReshapeOptionsT() { - } -}; - -struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReshapeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NEW_SHAPE = 4 - }; - const flatbuffers::Vector *new_shape() const { - return GetPointer *>(VT_NEW_SHAPE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NEW_SHAPE) && - verifier.VerifyVector(new_shape()) && - verifier.EndTable(); - } - ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReshapeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_new_shape(flatbuffers::Offset> new_shape) { - fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); - } - explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReshapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> new_shape = 0) { - ReshapeOptionsBuilder builder_(_fbb); - builder_.add_new_shape(new_shape); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateReshapeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *new_shape = nullptr) { - auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - new_shape__); -} - -flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable { - typedef SpaceToBatchNDOptions TableType; - SpaceToBatchNDOptionsT() { - } -}; - -struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToBatchNDOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceToBatchNDOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceToBatchNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SpaceToBatchNDOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable { - typedef BatchToSpaceNDOptions TableType; - BatchToSpaceNDOptionsT() { - } -}; - -struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchToSpaceNDOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchToSpaceNDOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchToSpaceNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BatchToSpaceNDOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SkipGramOptionsT : public flatbuffers::NativeTable { - typedef SkipGramOptions TableType; - int32_t ngram_size; - int32_t max_skip_size; - bool include_all_ngrams; - SkipGramOptionsT() - : ngram_size(0), - max_skip_size(0), - include_all_ngrams(false) { - } -}; - -struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SkipGramOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NGRAM_SIZE = 4, - VT_MAX_SKIP_SIZE = 6, - VT_INCLUDE_ALL_NGRAMS = 8 - }; - int32_t ngram_size() const { - return GetField(VT_NGRAM_SIZE, 0); - } - int32_t max_skip_size() const { - return GetField(VT_MAX_SKIP_SIZE, 0); - } - bool include_all_ngrams() const { - return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NGRAM_SIZE) && - VerifyField(verifier, VT_MAX_SKIP_SIZE) && - VerifyField(verifier, VT_INCLUDE_ALL_NGRAMS) && - verifier.EndTable(); - } - SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SkipGramOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_ngram_size(int32_t ngram_size) { - fbb_.AddElement(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); - } - void add_max_skip_size(int32_t max_skip_size) { - fbb_.AddElement(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); - } - void add_include_all_ngrams(bool include_all_ngrams) { - fbb_.AddElement(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast(include_all_ngrams), 0); - } - explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSkipGramOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t ngram_size = 0, - int32_t max_skip_size = 0, - bool include_all_ngrams = false) { - SkipGramOptionsBuilder builder_(_fbb); - builder_.add_max_skip_size(max_skip_size); - builder_.add_ngram_size(ngram_size); - builder_.add_include_all_ngrams(include_all_ngrams); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceToDepthOptionsT : public flatbuffers::NativeTable { - typedef SpaceToDepthOptions TableType; - int32_t block_size; - SpaceToDepthOptionsT() - : block_size(0) { - } -}; - -struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToDepthOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE) && - verifier.EndTable(); - } - SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceToDepthOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceToDepthOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - SpaceToDepthOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthToSpaceOptionsT : public flatbuffers::NativeTable { - typedef DepthToSpaceOptions TableType; - int32_t block_size; - DepthToSpaceOptionsT() - : block_size(0) { - } -}; - -struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthToSpaceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE) && - verifier.EndTable(); - } - DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DepthToSpaceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthToSpaceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - DepthToSpaceOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubOptionsT : public flatbuffers::NativeTable { - typedef SubOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool pot_scale_int16; - SubOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - pot_scale_int16(true) { - } -}; - -struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_POT_SCALE_INT16) && - verifier.EndTable(); - } - SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(SubOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SubOptionsBuilder &operator=(const SubOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - SubOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DivOptionsT : public flatbuffers::NativeTable { - typedef DivOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - DivOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DivOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DivOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DivOptionsBuilder &operator=(const DivOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDivOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - DivOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TopKV2OptionsT : public flatbuffers::NativeTable { - typedef TopKV2Options TableType; - TopKV2OptionsT() { - } -}; - -struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TopKV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TopKV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTopKV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - TopKV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable { - typedef EmbeddingLookupSparseOptions TableType; - tflite::CombinerType combiner; - EmbeddingLookupSparseOptionsT() - : combiner(tflite::CombinerType_SUM) { - } -}; - -struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EmbeddingLookupSparseOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COMBINER = 4 - }; - tflite::CombinerType combiner() const { - return static_cast(GetField(VT_COMBINER, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COMBINER) && - verifier.EndTable(); - } - EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EmbeddingLookupSparseOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_combiner(tflite::CombinerType combiner) { - fbb_.AddElement(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast(combiner), 0); - } - explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::CombinerType combiner = tflite::CombinerType_SUM) { - EmbeddingLookupSparseOptionsBuilder builder_(_fbb); - builder_.add_combiner(combiner); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherOptionsT : public flatbuffers::NativeTable { - typedef GatherOptions TableType; - int32_t axis; - GatherOptionsT() - : axis(0) { - } -}; - -struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); - } - explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherOptionsBuilder &operator=(const GatherOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - GatherOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeOptionsT : public flatbuffers::NativeTable { - typedef TransposeOptions TableType; - TransposeOptionsT() { - } -}; - -struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTransposeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TransposeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpOptionsT : public flatbuffers::NativeTable { - typedef ExpOptions TableType; - ExpOptionsT() { - } -}; - -struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ExpOptionsBuilder &operator=(const ExpOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CosOptionsT : public flatbuffers::NativeTable { - typedef CosOptions TableType; - CosOptionsT() { - } -}; - -struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CosOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CosOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CosOptionsBuilder &operator=(const CosOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCosOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - CosOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReducerOptionsT : public flatbuffers::NativeTable { - typedef ReducerOptions TableType; - bool keep_dims; - ReducerOptionsT() - : keep_dims(false) { - } -}; - -struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReducerOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KEEP_DIMS = 4 - }; - bool keep_dims() const { - return GetField(VT_KEEP_DIMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KEEP_DIMS) && - verifier.EndTable(); - } - ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReducerOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_keep_dims(bool keep_dims) { - fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); - } - explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReducerOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool keep_dims = false) { - ReducerOptionsBuilder builder_(_fbb); - builder_.add_keep_dims(keep_dims); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SqueezeOptionsT : public flatbuffers::NativeTable { - typedef SqueezeOptions TableType; - std::vector squeeze_dims; - SqueezeOptionsT() { - } -}; - -struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SqueezeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SQUEEZE_DIMS = 4 - }; - const flatbuffers::Vector *squeeze_dims() const { - return GetPointer *>(VT_SQUEEZE_DIMS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SQUEEZE_DIMS) && - verifier.VerifyVector(squeeze_dims()) && - verifier.EndTable(); - } - SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SqueezeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_squeeze_dims(flatbuffers::Offset> squeeze_dims) { - fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); - } - explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSqueezeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> squeeze_dims = 0) { - SqueezeOptionsBuilder builder_(_fbb); - builder_.add_squeeze_dims(squeeze_dims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSqueezeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *squeeze_dims = nullptr) { - auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - squeeze_dims__); -} - -flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SplitOptionsT : public flatbuffers::NativeTable { - typedef SplitOptions TableType; - int32_t num_splits; - SplitOptionsT() - : num_splits(0) { - } -}; - -struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 - }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS) && - verifier.EndTable(); - } - SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SplitOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitOptions::VT_NUM_SPLITS, num_splits, 0); - } - explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SplitOptionsBuilder &operator=(const SplitOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSplitOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SplitVOptionsT : public flatbuffers::NativeTable { - typedef SplitVOptions TableType; - int32_t num_splits; - SplitVOptionsT() - : num_splits(0) { - } -}; - -struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitVOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 - }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS) && - verifier.EndTable(); - } - SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SplitVOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); - } - explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSplitVOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitVOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct StridedSliceOptionsT : public flatbuffers::NativeTable { - typedef StridedSliceOptions TableType; - int32_t begin_mask; - int32_t end_mask; - int32_t ellipsis_mask; - int32_t new_axis_mask; - int32_t shrink_axis_mask; - StridedSliceOptionsT() - : begin_mask(0), - end_mask(0), - ellipsis_mask(0), - new_axis_mask(0), - shrink_axis_mask(0) { - } -}; - -struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef StridedSliceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BEGIN_MASK = 4, - VT_END_MASK = 6, - VT_ELLIPSIS_MASK = 8, - VT_NEW_AXIS_MASK = 10, - VT_SHRINK_AXIS_MASK = 12 - }; - int32_t begin_mask() const { - return GetField(VT_BEGIN_MASK, 0); - } - int32_t end_mask() const { - return GetField(VT_END_MASK, 0); - } - int32_t ellipsis_mask() const { - return GetField(VT_ELLIPSIS_MASK, 0); - } - int32_t new_axis_mask() const { - return GetField(VT_NEW_AXIS_MASK, 0); - } - int32_t shrink_axis_mask() const { - return GetField(VT_SHRINK_AXIS_MASK, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BEGIN_MASK) && - VerifyField(verifier, VT_END_MASK) && - VerifyField(verifier, VT_ELLIPSIS_MASK) && - VerifyField(verifier, VT_NEW_AXIS_MASK) && - VerifyField(verifier, VT_SHRINK_AXIS_MASK) && - verifier.EndTable(); - } - StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct StridedSliceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_begin_mask(int32_t begin_mask) { - fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); - } - void add_end_mask(int32_t end_mask) { - fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); - } - void add_ellipsis_mask(int32_t ellipsis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); - } - void add_new_axis_mask(int32_t new_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); - } - void add_shrink_axis_mask(int32_t shrink_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); - } - explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateStridedSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t begin_mask = 0, - int32_t end_mask = 0, - int32_t ellipsis_mask = 0, - int32_t new_axis_mask = 0, - int32_t shrink_axis_mask = 0) { - StridedSliceOptionsBuilder builder_(_fbb); - builder_.add_shrink_axis_mask(shrink_axis_mask); - builder_.add_new_axis_mask(new_axis_mask); - builder_.add_ellipsis_mask(ellipsis_mask); - builder_.add_end_mask(end_mask); - builder_.add_begin_mask(begin_mask); - return builder_.Finish(); -} - -flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogSoftmaxOptionsT : public flatbuffers::NativeTable { - typedef LogSoftmaxOptions TableType; - LogSoftmaxOptionsT() { - } -}; - -struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogSoftmaxOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogSoftmaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogSoftmaxOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CastOptionsT : public flatbuffers::NativeTable { - typedef CastOptions TableType; - tflite::TensorType in_data_type; - tflite::TensorType out_data_type; - CastOptionsT() - : in_data_type(tflite::TensorType_FLOAT32), - out_data_type(tflite::TensorType_FLOAT32) { - } -}; - -struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CastOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IN_DATA_TYPE = 4, - VT_OUT_DATA_TYPE = 6 - }; - tflite::TensorType in_data_type() const { - return static_cast(GetField(VT_IN_DATA_TYPE, 0)); - } - tflite::TensorType out_data_type() const { - return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IN_DATA_TYPE) && - VerifyField(verifier, VT_OUT_DATA_TYPE) && - verifier.EndTable(); - } - CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CastOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_in_data_type(tflite::TensorType in_data_type) { - fbb_.AddElement(CastOptions::VT_IN_DATA_TYPE, static_cast(in_data_type), 0); - } - void add_out_data_type(tflite::TensorType out_data_type) { - fbb_.AddElement(CastOptions::VT_OUT_DATA_TYPE, static_cast(out_data_type), 0); - } - explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CastOptionsBuilder &operator=(const CastOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCastOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType in_data_type = tflite::TensorType_FLOAT32, - tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) { - CastOptionsBuilder builder_(_fbb); - builder_.add_out_data_type(out_data_type); - builder_.add_in_data_type(in_data_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DequantizeOptionsT : public flatbuffers::NativeTable { - typedef DequantizeOptions TableType; - DequantizeOptionsT() { - } -}; - -struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DequantizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DequantizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDequantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DequantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MaximumMinimumOptionsT : public flatbuffers::NativeTable { - typedef MaximumMinimumOptions TableType; - MaximumMinimumOptionsT() { - } -}; - -struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MaximumMinimumOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MaximumMinimumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMaximumMinimumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MaximumMinimumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TileOptionsT : public flatbuffers::NativeTable { - typedef TileOptions TableType; - TileOptionsT() { - } -}; - -struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TileOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TileOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TileOptionsBuilder &operator=(const TileOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTileOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TileOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMaxOptionsT : public flatbuffers::NativeTable { - typedef ArgMaxOptions TableType; - tflite::TensorType output_type; - ArgMaxOptionsT() - : output_type(tflite::TensorType_FLOAT32) { - } -}; - -struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMaxOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE) && - verifier.EndTable(); - } - ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMaxOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMinOptionsT : public flatbuffers::NativeTable { - typedef ArgMinOptions TableType; - tflite::TensorType output_type; - ArgMinOptionsT() - : output_type(tflite::TensorType_FLOAT32) { - } -}; - -struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMinOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE) && - verifier.EndTable(); - } - ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMinOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMinOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMinOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMinOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GreaterOptionsT : public flatbuffers::NativeTable { - typedef GreaterOptions TableType; - GreaterOptionsT() { - } -}; - -struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GreaterOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGreaterOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GreaterEqualOptionsT : public flatbuffers::NativeTable { - typedef GreaterEqualOptions TableType; - GreaterEqualOptionsT() { - } -}; - -struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GreaterEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGreaterEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LessOptionsT : public flatbuffers::NativeTable { - typedef LessOptions TableType; - LessOptionsT() { - } -}; - -struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LessOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LessOptionsBuilder &operator=(const LessOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLessOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LessEqualOptionsT : public flatbuffers::NativeTable { - typedef LessEqualOptions TableType; - LessEqualOptionsT() { - } -}; - -struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LessEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLessEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NegOptionsT : public flatbuffers::NativeTable { - typedef NegOptions TableType; - NegOptionsT() { - } -}; - -struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NegOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NegOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NegOptionsBuilder &operator=(const NegOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNegOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NegOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SelectOptionsT : public flatbuffers::NativeTable { - typedef SelectOptions TableType; - SelectOptionsT() { - } -}; - -struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SelectOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SelectOptionsBuilder &operator=(const SelectOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelectOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SliceOptionsT : public flatbuffers::NativeTable { - typedef SliceOptions TableType; - SliceOptionsT() { - } -}; - -struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SliceOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SliceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SliceOptionsBuilder &operator=(const SliceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SliceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeConvOptionsT : public flatbuffers::NativeTable { - typedef TransposeConvOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - TransposeConvOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0) { - } -}; - -struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeConvOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - verifier.EndTable(); - } - TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeConvOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(TransposeConvOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); - } - explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTransposeConvOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0) { - TransposeConvOptionsBuilder builder_(_fbb); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpandDimsOptionsT : public flatbuffers::NativeTable { - typedef ExpandDimsOptions TableType; - ExpandDimsOptionsT() { - } -}; - -struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpandDimsOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpandDimsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpandDimsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpandDimsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparseToDenseOptionsT : public flatbuffers::NativeTable { - typedef SparseToDenseOptions TableType; - bool validate_indices; - SparseToDenseOptionsT() - : validate_indices(false) { - } -}; - -struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparseToDenseOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALIDATE_INDICES = 4 - }; - bool validate_indices() const { - return GetField(VT_VALIDATE_INDICES, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALIDATE_INDICES) && - verifier.EndTable(); - } - SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SparseToDenseOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_validate_indices(bool validate_indices) { - fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); - } - explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSparseToDenseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool validate_indices = false) { - SparseToDenseOptionsBuilder builder_(_fbb); - builder_.add_validate_indices(validate_indices); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EqualOptionsT : public flatbuffers::NativeTable { - typedef EqualOptions TableType; - EqualOptionsT() { - } -}; - -struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - EqualOptionsBuilder &operator=(const EqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - EqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NotEqualOptionsT : public flatbuffers::NativeTable { - typedef NotEqualOptions TableType; - NotEqualOptionsT() { - } -}; - -struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NotEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NotEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNotEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NotEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ShapeOptionsT : public flatbuffers::NativeTable { - typedef ShapeOptions TableType; - tflite::TensorType out_type; - ShapeOptionsT() - : out_type(tflite::TensorType_FLOAT32) { - } -}; - -struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ShapeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUT_TYPE = 4 - }; - tflite::TensorType out_type() const { - return static_cast(GetField(VT_OUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUT_TYPE) && - verifier.EndTable(); - } - ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ShapeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_out_type(tflite::TensorType out_type) { - fbb_.AddElement(ShapeOptions::VT_OUT_TYPE, static_cast(out_type), 0); - } - explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateShapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType out_type = tflite::TensorType_FLOAT32) { - ShapeOptionsBuilder builder_(_fbb); - builder_.add_out_type(out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RankOptionsT : public flatbuffers::NativeTable { - typedef RankOptions TableType; - RankOptionsT() { - } -}; - -struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RankOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RankOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RankOptionsBuilder &operator=(const RankOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRankOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RankOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PowOptionsT : public flatbuffers::NativeTable { - typedef PowOptions TableType; - PowOptionsT() { - } -}; - -struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PowOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PowOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PowOptionsBuilder &operator=(const PowOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePowOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PowOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FakeQuantOptionsT : public flatbuffers::NativeTable { - typedef FakeQuantOptions TableType; - float min; - float max; - int32_t num_bits; - bool narrow_range; - FakeQuantOptionsT() - : min(0.0f), - max(0.0f), - num_bits(0), - narrow_range(false) { - } -}; - -struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FakeQuantOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_NUM_BITS = 8, - VT_NARROW_RANGE = 10 - }; - float min() const { - return GetField(VT_MIN, 0.0f); - } - float max() const { - return GetField(VT_MAX, 0.0f); - } - int32_t num_bits() const { - return GetField(VT_NUM_BITS, 0); - } - bool narrow_range() const { - return GetField(VT_NARROW_RANGE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MIN) && - VerifyField(verifier, VT_MAX) && - VerifyField(verifier, VT_NUM_BITS) && - VerifyField(verifier, VT_NARROW_RANGE) && - verifier.EndTable(); - } - FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FakeQuantOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(float min) { - fbb_.AddElement(FakeQuantOptions::VT_MIN, min, 0.0f); - } - void add_max(float max) { - fbb_.AddElement(FakeQuantOptions::VT_MAX, max, 0.0f); - } - void add_num_bits(int32_t num_bits) { - fbb_.AddElement(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); - } - void add_narrow_range(bool narrow_range) { - fbb_.AddElement(FakeQuantOptions::VT_NARROW_RANGE, static_cast(narrow_range), 0); - } - explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFakeQuantOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float min = 0.0f, - float max = 0.0f, - int32_t num_bits = 0, - bool narrow_range = false) { - FakeQuantOptionsBuilder builder_(_fbb); - builder_.add_num_bits(num_bits); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_narrow_range(narrow_range); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PackOptionsT : public flatbuffers::NativeTable { - typedef PackOptions TableType; - int32_t values_count; - int32_t axis; - PackOptionsT() - : values_count(0), - axis(0) { - } -}; - -struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PackOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES_COUNT = 4, - VT_AXIS = 6 - }; - int32_t values_count() const { - return GetField(VT_VALUES_COUNT, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALUES_COUNT) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PackOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values_count(int32_t values_count) { - fbb_.AddElement(PackOptions::VT_VALUES_COUNT, values_count, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(PackOptions::VT_AXIS, axis, 0); - } - explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PackOptionsBuilder &operator=(const PackOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t values_count = 0, - int32_t axis = 0) { - PackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_values_count(values_count); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalOrOptionsT : public flatbuffers::NativeTable { - typedef LogicalOrOptions TableType; - LogicalOrOptionsT() { - } -}; - -struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalOrOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalOrOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalOrOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalOrOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OneHotOptionsT : public flatbuffers::NativeTable { - typedef OneHotOptions TableType; - int32_t axis; - OneHotOptionsT() - : axis(0) { - } -}; - -struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OneHotOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OneHotOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(OneHotOptions::VT_AXIS, axis, 0); - } - explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOneHotOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - OneHotOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AbsOptionsT : public flatbuffers::NativeTable { - typedef AbsOptions TableType; - AbsOptionsT() { - } -}; - -struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AbsOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AbsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AbsOptionsBuilder &operator=(const AbsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAbsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AbsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HardSwishOptionsT : public flatbuffers::NativeTable { - typedef HardSwishOptions TableType; - HardSwishOptionsT() { - } -}; - -struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HardSwishOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HardSwishOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHardSwishOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HardSwishOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalAndOptionsT : public flatbuffers::NativeTable { - typedef LogicalAndOptions TableType; - LogicalAndOptionsT() { - } -}; - -struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalAndOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalAndOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalAndOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalAndOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalNotOptionsT : public flatbuffers::NativeTable { - typedef LogicalNotOptions TableType; - LogicalNotOptionsT() { - } -}; - -struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalNotOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalNotOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalNotOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalNotOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnpackOptionsT : public flatbuffers::NativeTable { - typedef UnpackOptions TableType; - int32_t num; - int32_t axis; - UnpackOptionsT() - : num(0), - axis(0) { - } -}; - -struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnpackOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM = 4, - VT_AXIS = 6 - }; - int32_t num() const { - return GetField(VT_NUM, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnpackOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num(int32_t num) { - fbb_.AddElement(UnpackOptions::VT_NUM, num, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(UnpackOptions::VT_AXIS, axis, 0); - } - explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnpackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num = 0, - int32_t axis = 0) { - UnpackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_num(num); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorDivOptionsT : public flatbuffers::NativeTable { - typedef FloorDivOptions TableType; - FloorDivOptionsT() { - } -}; - -struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorDivOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorDivOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFloorDivOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorDivOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquareOptionsT : public flatbuffers::NativeTable { - typedef SquareOptions TableType; - SquareOptionsT() { - } -}; - -struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquareOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquareOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SquareOptionsBuilder &operator=(const SquareOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSquareOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquareOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ZerosLikeOptionsT : public flatbuffers::NativeTable { - typedef ZerosLikeOptions TableType; - ZerosLikeOptionsT() { - } -}; - -struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ZerosLikeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ZerosLikeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateZerosLikeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ZerosLikeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FillOptionsT : public flatbuffers::NativeTable { - typedef FillOptions TableType; - FillOptionsT() { - } -}; - -struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FillOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FillOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FillOptionsBuilder &operator=(const FillOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFillOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FillOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorModOptionsT : public flatbuffers::NativeTable { - typedef FloorModOptions TableType; - FloorModOptionsT() { - } -}; - -struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorModOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorModOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFloorModOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorModOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RangeOptionsT : public flatbuffers::NativeTable { - typedef RangeOptions TableType; - RangeOptionsT() { - } -}; - -struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RangeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RangeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RangeOptionsBuilder &operator=(const RangeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRangeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RangeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LeakyReluOptionsT : public flatbuffers::NativeTable { - typedef LeakyReluOptions TableType; - float alpha; - LeakyReluOptionsT() - : alpha(0.0f) { - } -}; - -struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LeakyReluOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALPHA = 4 - }; - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALPHA) && - verifier.EndTable(); - } - LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LeakyReluOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_alpha(float alpha) { - fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); - } - explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLeakyReluOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float alpha = 0.0f) { - LeakyReluOptionsBuilder builder_(_fbb); - builder_.add_alpha(alpha); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable { - typedef SquaredDifferenceOptions TableType; - SquaredDifferenceOptionsT() { - } -}; - -struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquaredDifferenceOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquaredDifferenceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSquaredDifferenceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquaredDifferenceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MirrorPadOptionsT : public flatbuffers::NativeTable { - typedef MirrorPadOptions TableType; - tflite::MirrorPadMode mode; - MirrorPadOptionsT() - : mode(tflite::MirrorPadMode_REFLECT) { - } -}; - -struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MirrorPadOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MODE = 4 - }; - tflite::MirrorPadMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MODE) && - verifier.EndTable(); - } - MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MirrorPadOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_mode(tflite::MirrorPadMode mode) { - fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); - } - explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMirrorPadOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) { - MirrorPadOptionsBuilder builder_(_fbb); - builder_.add_mode(mode); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UniqueOptionsT : public flatbuffers::NativeTable { - typedef UniqueOptions TableType; - tflite::TensorType idx_out_type; - UniqueOptionsT() - : idx_out_type(tflite::TensorType_INT32) { - } -}; - -struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UniqueOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IDX_OUT_TYPE = 4 - }; - tflite::TensorType idx_out_type() const { - return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IDX_OUT_TYPE) && - verifier.EndTable(); - } - UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UniqueOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_idx_out_type(tflite::TensorType idx_out_type) { - fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); - } - explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUniqueOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType idx_out_type = tflite::TensorType_INT32) { - UniqueOptionsBuilder builder_(_fbb); - builder_.add_idx_out_type(idx_out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseV2OptionsT : public flatbuffers::NativeTable { - typedef ReverseV2Options TableType; - ReverseV2OptionsT() { - } -}; - -struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - ReverseV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AddNOptionsT : public flatbuffers::NativeTable { - typedef AddNOptions TableType; - AddNOptionsT() { - } -}; - -struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddNOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AddNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AddNOptionsBuilder &operator=(const AddNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAddNOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AddNOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherNdOptionsT : public flatbuffers::NativeTable { - typedef GatherNdOptions TableType; - GatherNdOptionsT() { - } -}; - -struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherNdOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherNdOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GatherNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct WhereOptionsT : public flatbuffers::NativeTable { - typedef WhereOptions TableType; - WhereOptionsT() { - } -}; - -struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhereOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct WhereOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - WhereOptionsBuilder &operator=(const WhereOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateWhereOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - WhereOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseSequenceOptionsT : public flatbuffers::NativeTable { - typedef ReverseSequenceOptions TableType; - int32_t seq_dim; - int32_t batch_dim; - ReverseSequenceOptionsT() - : seq_dim(0), - batch_dim(0) { - } -}; - -struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseSequenceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SEQ_DIM = 4, - VT_BATCH_DIM = 6 - }; - int32_t seq_dim() const { - return GetField(VT_SEQ_DIM, 0); - } - int32_t batch_dim() const { - return GetField(VT_BATCH_DIM, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SEQ_DIM) && - VerifyField(verifier, VT_BATCH_DIM) && - verifier.EndTable(); - } - ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseSequenceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_seq_dim(int32_t seq_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); - } - void add_batch_dim(int32_t batch_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); - } - explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseSequenceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t seq_dim = 0, - int32_t batch_dim = 0) { - ReverseSequenceOptionsBuilder builder_(_fbb); - builder_.add_batch_dim(batch_dim); - builder_.add_seq_dim(seq_dim); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatrixDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixDiagOptions TableType; - MatrixDiagOptionsT() { - } -}; - -struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixDiagOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatrixDiagOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatrixDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizeOptionsT : public flatbuffers::NativeTable { - typedef QuantizeOptions TableType; - QuantizeOptionsT() { - } -}; - -struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - QuantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixSetDiagOptions TableType; - MatrixSetDiagOptionsT() { - } -}; - -struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixSetDiagOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatrixSetDiagOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatrixSetDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixSetDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct IfOptionsT : public flatbuffers::NativeTable { - typedef IfOptions TableType; - int32_t then_subgraph_index; - int32_t else_subgraph_index; - IfOptionsT() - : then_subgraph_index(0), - else_subgraph_index(0) { - } -}; - -struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef IfOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_THEN_SUBGRAPH_INDEX = 4, - VT_ELSE_SUBGRAPH_INDEX = 6 - }; - int32_t then_subgraph_index() const { - return GetField(VT_THEN_SUBGRAPH_INDEX, 0); - } - int32_t else_subgraph_index() const { - return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX) && - VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct IfOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_then_subgraph_index(int32_t then_subgraph_index) { - fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); - } - void add_else_subgraph_index(int32_t else_subgraph_index) { - fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); - } - explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - IfOptionsBuilder &operator=(const IfOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateIfOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t then_subgraph_index = 0, - int32_t else_subgraph_index = 0) { - IfOptionsBuilder builder_(_fbb); - builder_.add_else_subgraph_index(else_subgraph_index); - builder_.add_then_subgraph_index(then_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CallOnceOptionsT : public flatbuffers::NativeTable { - typedef CallOnceOptions TableType; - int32_t init_subgraph_index; - CallOnceOptionsT() - : init_subgraph_index(0) { - } -}; - -struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOnceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INIT_SUBGRAPH_INDEX = 4 - }; - int32_t init_subgraph_index() const { - return GetField(VT_INIT_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CallOnceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_init_subgraph_index(int32_t init_subgraph_index) { - fbb_.AddElement(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0); - } - explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CallOnceOptionsBuilder &operator=(const CallOnceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCallOnceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t init_subgraph_index = 0) { - CallOnceOptionsBuilder builder_(_fbb); - builder_.add_init_subgraph_index(init_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct WhileOptionsT : public flatbuffers::NativeTable { - typedef WhileOptions TableType; - int32_t cond_subgraph_index; - int32_t body_subgraph_index; - WhileOptionsT() - : cond_subgraph_index(0), - body_subgraph_index(0) { - } -}; - -struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhileOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COND_SUBGRAPH_INDEX = 4, - VT_BODY_SUBGRAPH_INDEX = 6 - }; - int32_t cond_subgraph_index() const { - return GetField(VT_COND_SUBGRAPH_INDEX, 0); - } - int32_t body_subgraph_index() const { - return GetField(VT_BODY_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COND_SUBGRAPH_INDEX) && - VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct WhileOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_cond_subgraph_index(int32_t cond_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); - } - void add_body_subgraph_index(int32_t body_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); - } - explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - WhileOptionsBuilder &operator=(const WhileOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateWhileOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t cond_subgraph_index = 0, - int32_t body_subgraph_index = 0) { - WhileOptionsBuilder builder_(_fbb); - builder_.add_body_subgraph_index(body_subgraph_index); - builder_.add_cond_subgraph_index(cond_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV4Options TableType; - NonMaxSuppressionV4OptionsT() { - } -}; - -struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV4OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV4OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV4OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV5Options TableType; - NonMaxSuppressionV5OptionsT() { - } -}; - -struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV5OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV5OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV5OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ScatterNdOptionsT : public flatbuffers::NativeTable { - typedef ScatterNdOptions TableType; - ScatterNdOptionsT() { - } -}; - -struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ScatterNdOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ScatterNdOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateScatterNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ScatterNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SelectV2OptionsT : public flatbuffers::NativeTable { - typedef SelectV2Options TableType; - SelectV2OptionsT() { - } -}; - -struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SelectV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelectV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DensifyOptionsT : public flatbuffers::NativeTable { - typedef DensifyOptions TableType; - DensifyOptionsT() { - } -}; - -struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DensifyOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DensifyOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDensifyOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DensifyOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SegmentSumOptionsT : public flatbuffers::NativeTable { - typedef SegmentSumOptions TableType; - SegmentSumOptionsT() { - } -}; - -struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SegmentSumOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SegmentSumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSegmentSumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SegmentSumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchMatMulOptionsT : public flatbuffers::NativeTable { - typedef BatchMatMulOptions TableType; - bool adj_x; - bool adj_y; - bool asymmetric_quantize_inputs; - BatchMatMulOptionsT() - : adj_x(false), - adj_y(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchMatMulOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ADJ_X = 4, - VT_ADJ_Y = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool adj_x() const { - return GetField(VT_ADJ_X, 0) != 0; - } - bool adj_y() const { - return GetField(VT_ADJ_Y, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ADJ_X) && - VerifyField(verifier, VT_ADJ_Y) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchMatMulOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_adj_x(bool adj_x) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_X, static_cast(adj_x), 0); - } - void add_adj_y(bool adj_y) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchMatMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool adj_x = false, - bool adj_y = false, - bool asymmetric_quantize_inputs = false) { - BatchMatMulOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_adj_y(adj_y); - builder_.add_adj_x(adj_x); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CumsumOptionsT : public flatbuffers::NativeTable { - typedef CumsumOptions TableType; - bool exclusive; - bool reverse; - CumsumOptionsT() - : exclusive(false), - reverse(false) { - } -}; - -struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CumsumOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_EXCLUSIVE = 4, - VT_REVERSE = 6 - }; - bool exclusive() const { - return GetField(VT_EXCLUSIVE, 0) != 0; - } - bool reverse() const { - return GetField(VT_REVERSE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_EXCLUSIVE) && - VerifyField(verifier, VT_REVERSE) && - verifier.EndTable(); - } - CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CumsumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_exclusive(bool exclusive) { - fbb_.AddElement(CumsumOptions::VT_EXCLUSIVE, static_cast(exclusive), 0); - } - void add_reverse(bool reverse) { - fbb_.AddElement(CumsumOptions::VT_REVERSE, static_cast(reverse), 0); - } - explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CumsumOptionsBuilder &operator=(const CumsumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCumsumOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool exclusive = false, - bool reverse = false) { - CumsumOptionsBuilder builder_(_fbb); - builder_.add_reverse(reverse); - builder_.add_exclusive(exclusive); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BroadcastToOptionsT : public flatbuffers::NativeTable { - typedef BroadcastToOptions TableType; - BroadcastToOptionsT() { - } -}; - -struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BroadcastToOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BroadcastToOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BroadcastToOptionsBuilder &operator=(const BroadcastToOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBroadcastToOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BroadcastToOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Rfft2dOptionsT : public flatbuffers::NativeTable { - typedef Rfft2dOptions TableType; - Rfft2dOptionsT() {} -}; - -struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Rfft2dOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && verifier.EndTable(); - } - Rfft2dOptionsT *UnPack( - const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo( - Rfft2dOptionsT *_o, - const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Rfft2dOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Rfft2dOptionsBuilder &operator=(const Rfft2dOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - Rfft2dOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OperatorCodeT : public flatbuffers::NativeTable { - typedef OperatorCode TableType; - int8_t deprecated_builtin_code; - std::string custom_code; - int32_t version; - tflite::BuiltinOperator builtin_code; - OperatorCodeT() - : deprecated_builtin_code(0), - version(1), - builtin_code(tflite::BuiltinOperator_ADD) { - } -}; - -struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorCodeT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DEPRECATED_BUILTIN_CODE = 4, - VT_CUSTOM_CODE = 6, - VT_VERSION = 8, - VT_BUILTIN_CODE = 10 - }; - int8_t deprecated_builtin_code() const { - return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); - } - const flatbuffers::String *custom_code() const { - return GetPointer(VT_CUSTOM_CODE); - } - int32_t version() const { - return GetField(VT_VERSION, 1); - } - tflite::BuiltinOperator builtin_code() const { - return static_cast(GetField(VT_BUILTIN_CODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE) && - VerifyOffset(verifier, VT_CUSTOM_CODE) && - verifier.VerifyString(custom_code()) && - VerifyField(verifier, VT_VERSION) && - VerifyField(verifier, VT_BUILTIN_CODE) && - verifier.EndTable(); - } - OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OperatorCodeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { - fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); - } - void add_custom_code(flatbuffers::Offset custom_code) { - fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); - } - void add_version(int32_t version) { - fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); - } - void add_builtin_code(tflite::BuiltinOperator builtin_code) { - fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); - } - explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperatorCode( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - flatbuffers::Offset custom_code = 0, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - OperatorCodeBuilder builder_(_fbb); - builder_.add_builtin_code(builtin_code); - builder_.add_version(version); - builder_.add_custom_code(custom_code); - builder_.add_deprecated_builtin_code(deprecated_builtin_code); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorCodeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - const char *custom_code = nullptr, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; - return tflite::CreateOperatorCode( - _fbb, - deprecated_builtin_code, - custom_code__, - version, - builtin_code); -} - -flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OperatorT : public flatbuffers::NativeTable { - typedef Operator TableType; - uint32_t opcode_index; - std::vector inputs; - std::vector outputs; - tflite::BuiltinOptionsUnion builtin_options; - std::vector custom_options; - tflite::CustomOptionsFormat custom_options_format; - std::vector mutating_variable_inputs; - std::vector intermediates; - OperatorT() - : opcode_index(0), - custom_options_format(tflite::CustomOptionsFormat_FLEXBUFFERS) { - } -}; - -struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPCODE_INDEX = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_BUILTIN_OPTIONS_TYPE = 10, - VT_BUILTIN_OPTIONS = 12, - VT_CUSTOM_OPTIONS = 14, - VT_CUSTOM_OPTIONS_FORMAT = 16, - VT_MUTATING_VARIABLE_INPUTS = 18, - VT_INTERMEDIATES = 20 - }; - uint32_t opcode_index() const { - return GetField(VT_OPCODE_INDEX, 0); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - tflite::BuiltinOptions builtin_options_type() const { - return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); - } - const void *builtin_options() const { - return GetPointer(VT_BUILTIN_OPTIONS); - } - template const T *builtin_options_as() const; - const tflite::Conv2DOptions *builtin_options_as_Conv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Pool2DOptions *builtin_options_as_Pool2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SVDFOptions *builtin_options_as_SVDFOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RNNOptions *builtin_options_as_RNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddOptions *builtin_options_as_AddOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::L2NormOptions *builtin_options_as_L2NormOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSTMOptions *builtin_options_as_LSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOptions *builtin_options_as_CallOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReshapeOptions *builtin_options_as_ReshapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SkipGramOptions *builtin_options_as_SkipGramOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MulOptions *builtin_options_as_MulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadOptions *builtin_options_as_PadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherOptions *builtin_options_as_GatherOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeOptions *builtin_options_as_TransposeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReducerOptions *builtin_options_as_ReducerOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SubOptions *builtin_options_as_SubOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DivOptions *builtin_options_as_DivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SqueezeOptions *builtin_options_as_SqueezeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ExpOptions *builtin_options_as_ExpOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TopKV2Options *builtin_options_as_TopKV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::SplitOptions *builtin_options_as_SplitOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CastOptions *builtin_options_as_CastOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DequantizeOptions *builtin_options_as_DequantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LessOptions *builtin_options_as_LessOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NegOptions *builtin_options_as_NegOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadV2Options *builtin_options_as_PadV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::GreaterOptions *builtin_options_as_GreaterOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LessEqualOptions *builtin_options_as_LessEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SelectOptions *builtin_options_as_SelectOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SliceOptions *builtin_options_as_SliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TileOptions *builtin_options_as_TileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EqualOptions *builtin_options_as_EqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NotEqualOptions *builtin_options_as_NotEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ShapeOptions *builtin_options_as_ShapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PowOptions *builtin_options_as_PowOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ArgMinOptions *builtin_options_as_ArgMinOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PackOptions *builtin_options_as_PackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::OneHotOptions *builtin_options_as_OneHotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnpackOptions *builtin_options_as_UnpackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FloorDivOptions *builtin_options_as_FloorDivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SquareOptions *builtin_options_as_SquareOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FillOptions *builtin_options_as_FillOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FloorModOptions *builtin_options_as_FloorModOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RangeOptions *builtin_options_as_RangeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AbsOptions *builtin_options_as_AbsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SplitVOptions *builtin_options_as_SplitVOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UniqueOptions *builtin_options_as_UniqueOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReverseV2Options *builtin_options_as_ReverseV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddNOptions *builtin_options_as_AddNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherNdOptions *builtin_options_as_GatherNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CosOptions *builtin_options_as_CosOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::WhereOptions *builtin_options_as_WhereOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RankOptions *builtin_options_as_RankOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::QuantizeOptions *builtin_options_as_QuantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HardSwishOptions *builtin_options_as_HardSwishOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::IfOptions *builtin_options_as_IfOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::WhileOptions *builtin_options_as_WhileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SelectV2Options *builtin_options_as_SelectV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::DensifyOptions *builtin_options_as_DensifyOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DensifyOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SegmentSumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions - ? static_cast(builtin_options()) - : nullptr; - } - const flatbuffers::Vector *custom_options() const { - return GetPointer *>(VT_CUSTOM_OPTIONS); - } - tflite::CustomOptionsFormat custom_options_format() const { - return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); - } - const flatbuffers::Vector *mutating_variable_inputs() const { - return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); - } - const flatbuffers::Vector *intermediates() const { - return GetPointer *>(VT_INTERMEDIATES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPCODE_INDEX) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE) && - VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && - VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && - VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && - verifier.VerifyVector(custom_options()) && - VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT) && - VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && - verifier.VerifyVector(mutating_variable_inputs()) && - VerifyOffset(verifier, VT_INTERMEDIATES) && - verifier.VerifyVector(intermediates()) && - verifier.EndTable(); - } - OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv2DOptions(); -} - -template<> inline const tflite::DepthwiseConv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthwiseConv2DOptions(); -} - -template<> inline const tflite::ConcatEmbeddingsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatEmbeddingsOptions(); -} - -template<> inline const tflite::LSHProjectionOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSHProjectionOptions(); -} - -template<> inline const tflite::Pool2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Pool2DOptions(); -} - -template<> inline const tflite::SVDFOptions *Operator::builtin_options_as() const { - return builtin_options_as_SVDFOptions(); -} - -template<> inline const tflite::RNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_RNNOptions(); -} - -template<> inline const tflite::FullyConnectedOptions *Operator::builtin_options_as() const { - return builtin_options_as_FullyConnectedOptions(); -} - -template<> inline const tflite::SoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_SoftmaxOptions(); -} - -template<> inline const tflite::ConcatenationOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatenationOptions(); -} - -template<> inline const tflite::AddOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddOptions(); -} - -template<> inline const tflite::L2NormOptions *Operator::builtin_options_as() const { - return builtin_options_as_L2NormOptions(); -} - -template<> inline const tflite::LocalResponseNormalizationOptions *Operator::builtin_options_as() const { - return builtin_options_as_LocalResponseNormalizationOptions(); -} - -template<> inline const tflite::LSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSTMOptions(); -} - -template<> inline const tflite::ResizeBilinearOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeBilinearOptions(); -} - -template<> inline const tflite::CallOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOptions(); -} - -template<> inline const tflite::ReshapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReshapeOptions(); -} - -template<> inline const tflite::SkipGramOptions *Operator::builtin_options_as() const { - return builtin_options_as_SkipGramOptions(); -} - -template<> inline const tflite::SpaceToDepthOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToDepthOptions(); -} - -template<> inline const tflite::EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { - return builtin_options_as_EmbeddingLookupSparseOptions(); -} - -template<> inline const tflite::MulOptions *Operator::builtin_options_as() const { - return builtin_options_as_MulOptions(); -} - -template<> inline const tflite::PadOptions *Operator::builtin_options_as() const { - return builtin_options_as_PadOptions(); -} - -template<> inline const tflite::GatherOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherOptions(); -} - -template<> inline const tflite::BatchToSpaceNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchToSpaceNDOptions(); -} - -template<> inline const tflite::SpaceToBatchNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToBatchNDOptions(); -} - -template<> inline const tflite::TransposeOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeOptions(); -} - -template<> inline const tflite::ReducerOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReducerOptions(); -} - -template<> inline const tflite::SubOptions *Operator::builtin_options_as() const { - return builtin_options_as_SubOptions(); -} - -template<> inline const tflite::DivOptions *Operator::builtin_options_as() const { - return builtin_options_as_DivOptions(); -} - -template<> inline const tflite::SqueezeOptions *Operator::builtin_options_as() const { - return builtin_options_as_SqueezeOptions(); -} - -template<> inline const tflite::SequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_SequenceRNNOptions(); -} - -template<> inline const tflite::StridedSliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_StridedSliceOptions(); -} - -template<> inline const tflite::ExpOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpOptions(); -} - -template<> inline const tflite::TopKV2Options *Operator::builtin_options_as() const { - return builtin_options_as_TopKV2Options(); -} - -template<> inline const tflite::SplitOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitOptions(); -} - -template<> inline const tflite::LogSoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogSoftmaxOptions(); -} - -template<> inline const tflite::CastOptions *Operator::builtin_options_as() const { - return builtin_options_as_CastOptions(); -} - -template<> inline const tflite::DequantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_DequantizeOptions(); -} - -template<> inline const tflite::MaximumMinimumOptions *Operator::builtin_options_as() const { - return builtin_options_as_MaximumMinimumOptions(); -} - -template<> inline const tflite::ArgMaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMaxOptions(); -} - -template<> inline const tflite::LessOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessOptions(); -} - -template<> inline const tflite::NegOptions *Operator::builtin_options_as() const { - return builtin_options_as_NegOptions(); -} - -template<> inline const tflite::PadV2Options *Operator::builtin_options_as() const { - return builtin_options_as_PadV2Options(); -} - -template<> inline const tflite::GreaterOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterOptions(); -} - -template<> inline const tflite::GreaterEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterEqualOptions(); -} - -template<> inline const tflite::LessEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessEqualOptions(); -} - -template<> inline const tflite::SelectOptions *Operator::builtin_options_as() const { - return builtin_options_as_SelectOptions(); -} - -template<> inline const tflite::SliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SliceOptions(); -} - -template<> inline const tflite::TransposeConvOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeConvOptions(); -} - -template<> inline const tflite::SparseToDenseOptions *Operator::builtin_options_as() const { - return builtin_options_as_SparseToDenseOptions(); -} - -template<> inline const tflite::TileOptions *Operator::builtin_options_as() const { - return builtin_options_as_TileOptions(); -} - -template<> inline const tflite::ExpandDimsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpandDimsOptions(); -} - -template<> inline const tflite::EqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_EqualOptions(); -} - -template<> inline const tflite::NotEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_NotEqualOptions(); -} - -template<> inline const tflite::ShapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ShapeOptions(); -} - -template<> inline const tflite::PowOptions *Operator::builtin_options_as() const { - return builtin_options_as_PowOptions(); -} - -template<> inline const tflite::ArgMinOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMinOptions(); -} - -template<> inline const tflite::FakeQuantOptions *Operator::builtin_options_as() const { - return builtin_options_as_FakeQuantOptions(); -} - -template<> inline const tflite::PackOptions *Operator::builtin_options_as() const { - return builtin_options_as_PackOptions(); -} - -template<> inline const tflite::LogicalOrOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalOrOptions(); -} - -template<> inline const tflite::OneHotOptions *Operator::builtin_options_as() const { - return builtin_options_as_OneHotOptions(); -} - -template<> inline const tflite::LogicalAndOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalAndOptions(); -} - -template<> inline const tflite::LogicalNotOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalNotOptions(); -} - -template<> inline const tflite::UnpackOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnpackOptions(); -} - -template<> inline const tflite::FloorDivOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorDivOptions(); -} - -template<> inline const tflite::SquareOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquareOptions(); -} - -template<> inline const tflite::ZerosLikeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ZerosLikeOptions(); -} - -template<> inline const tflite::FillOptions *Operator::builtin_options_as() const { - return builtin_options_as_FillOptions(); -} - -template<> inline const tflite::BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceLSTMOptions(); -} - -template<> inline const tflite::BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceRNNOptions(); -} - -template<> inline const tflite::UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnidirectionalSequenceLSTMOptions(); -} - -template<> inline const tflite::FloorModOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorModOptions(); -} - -template<> inline const tflite::RangeOptions *Operator::builtin_options_as() const { - return builtin_options_as_RangeOptions(); -} - -template<> inline const tflite::ResizeNearestNeighborOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeNearestNeighborOptions(); -} - -template<> inline const tflite::LeakyReluOptions *Operator::builtin_options_as() const { - return builtin_options_as_LeakyReluOptions(); -} - -template<> inline const tflite::SquaredDifferenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquaredDifferenceOptions(); -} - -template<> inline const tflite::MirrorPadOptions *Operator::builtin_options_as() const { - return builtin_options_as_MirrorPadOptions(); -} - -template<> inline const tflite::AbsOptions *Operator::builtin_options_as() const { - return builtin_options_as_AbsOptions(); -} - -template<> inline const tflite::SplitVOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitVOptions(); -} - -template<> inline const tflite::UniqueOptions *Operator::builtin_options_as() const { - return builtin_options_as_UniqueOptions(); -} - -template<> inline const tflite::ReverseV2Options *Operator::builtin_options_as() const { - return builtin_options_as_ReverseV2Options(); -} - -template<> inline const tflite::AddNOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddNOptions(); -} - -template<> inline const tflite::GatherNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherNdOptions(); -} - -template<> inline const tflite::CosOptions *Operator::builtin_options_as() const { - return builtin_options_as_CosOptions(); -} - -template<> inline const tflite::WhereOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhereOptions(); -} - -template<> inline const tflite::RankOptions *Operator::builtin_options_as() const { - return builtin_options_as_RankOptions(); -} - -template<> inline const tflite::ReverseSequenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReverseSequenceOptions(); -} - -template<> inline const tflite::MatrixDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixDiagOptions(); -} - -template<> inline const tflite::QuantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_QuantizeOptions(); -} - -template<> inline const tflite::MatrixSetDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixSetDiagOptions(); -} - -template<> inline const tflite::HardSwishOptions *Operator::builtin_options_as() const { - return builtin_options_as_HardSwishOptions(); -} - -template<> inline const tflite::IfOptions *Operator::builtin_options_as() const { - return builtin_options_as_IfOptions(); -} - -template<> inline const tflite::WhileOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhileOptions(); -} - -template<> inline const tflite::DepthToSpaceOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthToSpaceOptions(); -} - -template<> inline const tflite::NonMaxSuppressionV4Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV4Options(); -} - -template<> inline const tflite::NonMaxSuppressionV5Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV5Options(); -} - -template<> inline const tflite::ScatterNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_ScatterNdOptions(); -} - -template<> inline const tflite::SelectV2Options *Operator::builtin_options_as() const { - return builtin_options_as_SelectV2Options(); -} - -template<> inline const tflite::DensifyOptions *Operator::builtin_options_as() const { - return builtin_options_as_DensifyOptions(); -} - -template<> inline const tflite::SegmentSumOptions *Operator::builtin_options_as() const { - return builtin_options_as_SegmentSumOptions(); -} - -template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchMatMulOptions(); -} - -template<> inline const tflite::CumsumOptions *Operator::builtin_options_as() const { - return builtin_options_as_CumsumOptions(); -} - -template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOnceOptions(); -} - -template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as() const { - return builtin_options_as_BroadcastToOptions(); -} - -template <> -inline const tflite::Rfft2dOptions * -Operator::builtin_options_as() const { - return builtin_options_as_Rfft2dOptions(); -} - -struct OperatorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opcode_index(uint32_t opcode_index) { - fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(Operator::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); - } - void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) { - fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); - } - void add_builtin_options(flatbuffers::Offset builtin_options) { - fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); - } - void add_custom_options(flatbuffers::Offset> custom_options) { - fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); - } - void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) { - fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); - } - void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { - fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); - } - void add_intermediates(flatbuffers::Offset> intermediates) { - fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); - } - explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorBuilder &operator=(const OperatorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperator( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - flatbuffers::Offset> custom_options = 0, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - flatbuffers::Offset> mutating_variable_inputs = 0, - flatbuffers::Offset> intermediates = 0) { - OperatorBuilder builder_(_fbb); - builder_.add_intermediates(intermediates); - builder_.add_mutating_variable_inputs(mutating_variable_inputs); - builder_.add_custom_options(custom_options); - builder_.add_builtin_options(builtin_options); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_opcode_index(opcode_index); - builder_.add_custom_options_format(custom_options_format); - builder_.add_builtin_options_type(builtin_options_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - const std::vector *custom_options = nullptr, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - const std::vector *mutating_variable_inputs = nullptr, - const std::vector *intermediates = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; - auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; - auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; - return tflite::CreateOperator( - _fbb, - opcode_index, - inputs__, - outputs__, - builtin_options_type, - builtin_options, - custom_options__, - custom_options_format, - mutating_variable_inputs__, - intermediates__); -} - -flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubGraphT : public flatbuffers::NativeTable { - typedef SubGraph TableType; - std::vector> tensors; - std::vector inputs; - std::vector outputs; - std::vector> operators; - std::string name; - SubGraphT() { - } -}; - -struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubGraphT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TENSORS = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_OPERATORS = 10, - VT_NAME = 12 - }; - const flatbuffers::Vector> *tensors() const { - return GetPointer> *>(VT_TENSORS); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - const flatbuffers::Vector> *operators() const { - return GetPointer> *>(VT_OPERATORS); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TENSORS) && - verifier.VerifyVector(tensors()) && - verifier.VerifyVectorOfTables(tensors()) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyOffset(verifier, VT_OPERATORS) && - verifier.VerifyVector(operators()) && - verifier.VerifyVectorOfTables(operators()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubGraphBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_tensors(flatbuffers::Offset>> tensors) { - fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); - } - void add_operators(flatbuffers::Offset>> operators) { - fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(SubGraph::VT_NAME, name); - } - explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SubGraphBuilder &operator=(const SubGraphBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubGraph( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> tensors = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - flatbuffers::Offset>> operators = 0, - flatbuffers::Offset name = 0) { - SubGraphBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_operators(operators); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_tensors(tensors); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSubGraphDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *tensors = nullptr, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - const std::vector> *operators = nullptr, - const char *name = nullptr) { - auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateSubGraph( - _fbb, - tensors__, - inputs__, - outputs__, - operators__, - name__); -} - -flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BufferT : public flatbuffers::NativeTable { - typedef Buffer TableType; - std::vector data; - BufferT() { - } -}; - -struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BufferT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DATA = 4 - }; - const flatbuffers::Vector *data() const { - return GetPointer *>(VT_DATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DATA) && - verifier.VerifyVector(data()) && - verifier.EndTable(); - } - BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BufferBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_data(flatbuffers::Offset> data) { - fbb_.AddOffset(Buffer::VT_DATA, data); - } - explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BufferBuilder &operator=(const BufferBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBuffer( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> data = 0) { - BufferBuilder builder_(_fbb); - builder_.add_data(data); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBufferDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *data = nullptr) { - if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } - auto data__ = data ? _fbb.CreateVector(*data) : 0; - return tflite::CreateBuffer( - _fbb, - data__); -} - -flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MetadataT : public flatbuffers::NativeTable { - typedef Metadata TableType; - std::string name; - uint32_t buffer; - MetadataT() - : buffer(0) { - } -}; - -struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MetadataT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_BUFFER = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_BUFFER) && - verifier.EndTable(); - } - MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MetadataBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Metadata::VT_NAME, name); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); - } - explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MetadataBuilder &operator=(const MetadataBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t buffer = 0) { - MetadataBuilder builder_(_fbb); - builder_.add_buffer(buffer); - builder_.add_name(name); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateMetadataDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t buffer = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateMetadata( - _fbb, - name__, - buffer); -} - -flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorMapT : public flatbuffers::NativeTable { - typedef TensorMap TableType; - std::string name; - uint32_t tensor_index; - TensorMapT() - : tensor_index(0) { - } -}; - -struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorMapT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_TENSOR_INDEX = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t tensor_index() const { - return GetField(VT_TENSOR_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_TENSOR_INDEX) && - verifier.EndTable(); - } - TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorMapBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(TensorMap::VT_NAME, name); - } - void add_tensor_index(uint32_t tensor_index) { - fbb_.AddElement(TensorMap::VT_TENSOR_INDEX, tensor_index, 0); - } - explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorMapBuilder &operator=(const TensorMapBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensorMap( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t tensor_index = 0) { - TensorMapBuilder builder_(_fbb); - builder_.add_tensor_index(tensor_index); - builder_.add_name(name); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorMapDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t tensor_index = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateTensorMap( - _fbb, - name__, - tensor_index); -} - -flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SignatureDefT : public flatbuffers::NativeTable { - typedef SignatureDef TableType; - std::vector> inputs; - std::vector> outputs; - std::string method_name; - std::string key; - SignatureDefT() { - } -}; - -struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SignatureDefT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTS = 4, - VT_OUTPUTS = 6, - VT_METHOD_NAME = 8, - VT_KEY = 10 - }; - const flatbuffers::Vector> *inputs() const { - return GetPointer> *>(VT_INPUTS); - } - const flatbuffers::Vector> *outputs() const { - return GetPointer> *>(VT_OUTPUTS); - } - const flatbuffers::String *method_name() const { - return GetPointer(VT_METHOD_NAME); - } - const flatbuffers::String *key() const { - return GetPointer(VT_KEY); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - verifier.VerifyVectorOfTables(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - verifier.VerifyVectorOfTables(outputs()) && - VerifyOffset(verifier, VT_METHOD_NAME) && - verifier.VerifyString(method_name()) && - VerifyOffset(verifier, VT_KEY) && - verifier.VerifyString(key()) && - verifier.EndTable(); - } - SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SignatureDefBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputs(flatbuffers::Offset>> inputs) { - fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset>> outputs) { - fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs); - } - void add_method_name(flatbuffers::Offset method_name) { - fbb_.AddOffset(SignatureDef::VT_METHOD_NAME, method_name); - } - void add_key(flatbuffers::Offset key) { - fbb_.AddOffset(SignatureDef::VT_KEY, key); - } - explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SignatureDefBuilder &operator=(const SignatureDefBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSignatureDef( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> inputs = 0, - flatbuffers::Offset>> outputs = 0, - flatbuffers::Offset method_name = 0, - flatbuffers::Offset key = 0) { - SignatureDefBuilder builder_(_fbb); - builder_.add_key(key); - builder_.add_method_name(method_name); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSignatureDefDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *inputs = nullptr, - const std::vector> *outputs = nullptr, - const char *method_name = nullptr, - const char *key = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector>(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector>(*outputs) : 0; - auto method_name__ = method_name ? _fbb.CreateString(method_name) : 0; - auto key__ = key ? _fbb.CreateString(key) : 0; - return tflite::CreateSignatureDef( - _fbb, - inputs__, - outputs__, - method_name__, - key__); -} - -flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ModelT : public flatbuffers::NativeTable { - typedef Model TableType; - uint32_t version; - std::vector> operator_codes; - std::vector> subgraphs; - std::string description; - std::vector> buffers; - std::vector metadata_buffer; - std::vector> metadata; - std::vector> signature_defs; - ModelT() - : version(0) { - } -}; - -struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ModelT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VERSION = 4, - VT_OPERATOR_CODES = 6, - VT_SUBGRAPHS = 8, - VT_DESCRIPTION = 10, - VT_BUFFERS = 12, - VT_METADATA_BUFFER = 14, - VT_METADATA = 16, - VT_SIGNATURE_DEFS = 18 - }; - uint32_t version() const { - return GetField(VT_VERSION, 0); - } - const flatbuffers::Vector> *operator_codes() const { - return GetPointer> *>(VT_OPERATOR_CODES); - } - const flatbuffers::Vector> *subgraphs() const { - return GetPointer> *>(VT_SUBGRAPHS); - } - const flatbuffers::String *description() const { - return GetPointer(VT_DESCRIPTION); - } - const flatbuffers::Vector> *buffers() const { - return GetPointer> *>(VT_BUFFERS); - } - const flatbuffers::Vector *metadata_buffer() const { - return GetPointer *>(VT_METADATA_BUFFER); - } - const flatbuffers::Vector> *metadata() const { - return GetPointer> *>(VT_METADATA); - } - const flatbuffers::Vector> *signature_defs() const { - return GetPointer> *>(VT_SIGNATURE_DEFS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VERSION) && - VerifyOffset(verifier, VT_OPERATOR_CODES) && - verifier.VerifyVector(operator_codes()) && - verifier.VerifyVectorOfTables(operator_codes()) && - VerifyOffset(verifier, VT_SUBGRAPHS) && - verifier.VerifyVector(subgraphs()) && - verifier.VerifyVectorOfTables(subgraphs()) && - VerifyOffset(verifier, VT_DESCRIPTION) && - verifier.VerifyString(description()) && - VerifyOffset(verifier, VT_BUFFERS) && - verifier.VerifyVector(buffers()) && - verifier.VerifyVectorOfTables(buffers()) && - VerifyOffset(verifier, VT_METADATA_BUFFER) && - verifier.VerifyVector(metadata_buffer()) && - VerifyOffset(verifier, VT_METADATA) && - verifier.VerifyVector(metadata()) && - verifier.VerifyVectorOfTables(metadata()) && - VerifyOffset(verifier, VT_SIGNATURE_DEFS) && - verifier.VerifyVector(signature_defs()) && - verifier.VerifyVectorOfTables(signature_defs()) && - verifier.EndTable(); - } - ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ModelBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_version(uint32_t version) { - fbb_.AddElement(Model::VT_VERSION, version, 0); - } - void add_operator_codes(flatbuffers::Offset>> operator_codes) { - fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); - } - void add_subgraphs(flatbuffers::Offset>> subgraphs) { - fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); - } - void add_description(flatbuffers::Offset description) { - fbb_.AddOffset(Model::VT_DESCRIPTION, description); - } - void add_buffers(flatbuffers::Offset>> buffers) { - fbb_.AddOffset(Model::VT_BUFFERS, buffers); - } - void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { - fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); - } - void add_metadata(flatbuffers::Offset>> metadata) { - fbb_.AddOffset(Model::VT_METADATA, metadata); - } - void add_signature_defs(flatbuffers::Offset>> signature_defs) { - fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs); - } - explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ModelBuilder &operator=(const ModelBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateModel( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - flatbuffers::Offset>> operator_codes = 0, - flatbuffers::Offset>> subgraphs = 0, - flatbuffers::Offset description = 0, - flatbuffers::Offset>> buffers = 0, - flatbuffers::Offset> metadata_buffer = 0, - flatbuffers::Offset>> metadata = 0, - flatbuffers::Offset>> signature_defs = 0) { - ModelBuilder builder_(_fbb); - builder_.add_signature_defs(signature_defs); - builder_.add_metadata(metadata); - builder_.add_metadata_buffer(metadata_buffer); - builder_.add_buffers(buffers); - builder_.add_description(description); - builder_.add_subgraphs(subgraphs); - builder_.add_operator_codes(operator_codes); - builder_.add_version(version); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateModelDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - const std::vector> *operator_codes = nullptr, - const std::vector> *subgraphs = nullptr, - const char *description = nullptr, - const std::vector> *buffers = nullptr, - const std::vector *metadata_buffer = nullptr, - const std::vector> *metadata = nullptr, - const std::vector> *signature_defs = nullptr) { - auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; - auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; - auto description__ = description ? _fbb.CreateString(description) : 0; - auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; - auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; - auto metadata__ = metadata ? _fbb.CreateVector>(*metadata) : 0; - auto signature_defs__ = signature_defs ? _fbb.CreateVector>(*signature_defs) : 0; - return tflite::CreateModel( - _fbb, - version, - operator_codes__, - subgraphs__, - description__, - buffers__, - metadata_buffer__, - metadata__, - signature_defs__); -} - -flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CustomQuantizationT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } } -} - -inline flatbuffers::Offset CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCustomQuantization(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16); - auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - _custom); -} - -inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizationParametersT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } } - { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } } - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } } - { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } } - { auto _e = details_type(); _o->details.type = _e; } - { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); } - { auto _e = quantized_dimension(); _o->quantized_dimension = _e; } -} - -inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizationParameters(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; - auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; - auto _details_type = _o->details.type; - auto _details = _o->details.Pack(_fbb); - auto _quantized_dimension = _o->quantized_dimension; - return tflite::CreateQuantizationParameters( - _fbb, - _min, - _max, - _scale, - _zero_point, - _details_type, - _details, - _quantized_dimension); -} - -inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Int32VectorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Int32Vector::UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInt32Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateInt32Vector( - _fbb, - _values); -} - -inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Uint16VectorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint16Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint16Vector( - _fbb, - _values); -} - -inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Uint8VectorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } } -} - -inline flatbuffers::Offset Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint8Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint8Vector( - _fbb, - _values); -} - -inline DimensionMetadataT *DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DimensionMetadataT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = format(); _o->format = _e; } - { auto _e = dense_size(); _o->dense_size = _e; } - { auto _e = array_segments_type(); _o->array_segments.type = _e; } - { auto _e = array_segments(); if (_e) _o->array_segments.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver); } - { auto _e = array_indices_type(); _o->array_indices.type = _e; } - { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); } -} - -inline flatbuffers::Offset DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDimensionMetadata(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _format = _o->format; - auto _dense_size = _o->dense_size; - auto _array_segments_type = _o->array_segments.type; - auto _array_segments = _o->array_segments.Pack(_fbb); - auto _array_indices_type = _o->array_indices.type; - auto _array_indices = _o->array_indices.Pack(_fbb); - return tflite::CreateDimensionMetadata( - _fbb, - _format, - _dense_size, - _array_segments_type, - _array_segments, - _array_indices_type, - _array_indices); -} - -inline SparsityParametersT *SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparsityParametersT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } } - { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } } - { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim_metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } -} - -inline flatbuffers::Offset SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparsityParameters(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0; - auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0; - auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateSparsityParameters( - _fbb, - _traversal_order, - _block_map, - _dim_metadata); -} - -inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } - { auto _e = type(); _o->type = _e; } - { auto _e = buffer(); _o->buffer = _e; } - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = quantization(); if (_e) _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); } - { auto _e = is_variable(); _o->is_variable = _e; } - { auto _e = sparsity(); if (_e) _o->sparsity = std::unique_ptr(_e->UnPack(_resolver)); } - { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensor(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _type = _o->type; - auto _buffer = _o->buffer; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; - auto _is_variable = _o->is_variable; - auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0; - auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0; - return tflite::CreateTensor( - _fbb, - _shape, - _type, - _buffer, - _name, - _quantization, - _is_variable, - _sparsity, - _shape_signature); -} - -inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Conv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); -} - -inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Pool2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = filter_width(); _o->filter_width = _e; } - { auto _e = filter_height(); _o->filter_height = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _filter_width = _o->filter_width; - auto _filter_height = _o->filter_height; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreatePool2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _filter_width, - _filter_height, - _fused_activation_function); -} - -inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthwiseConv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = depth_multiplier(); _o->depth_multiplier = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _depth_multiplier = _o->depth_multiplier; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateDepthwiseConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _depth_multiplier, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); -} - -inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatEmbeddingsOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_channels(); _o->num_channels = _e; } - { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } } - { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_channels = _o->num_channels; - auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; - auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - _num_channels, - _num_columns_per_channel, - _embedding_dim_per_channel); -} - -inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSHProjectionOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; } -} - -inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSHProjectionOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return tflite::CreateLSHProjectionOptions( - _fbb, - _type); -} - -inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SVDFOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = rank(); _o->rank = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSVDFOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _rank = _o->rank; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSVDFOptions( - _fbb, - _rank, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateRNNOptions( - _fbb, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSequenceRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _merge_outputs = _o->merge_outputs; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function, - _merge_outputs, - _asymmetric_quantize_inputs); -} - -inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FullyConnectedOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = weights_format(); _o->weights_format = _e; } - { auto _e = keep_num_dims(); _o->keep_num_dims = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFullyConnectedOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _weights_format = _o->weights_format; - auto _keep_num_dims = _o->keep_num_dims; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateFullyConnectedOptions( - _fbb, - _fused_activation_function, - _weights_format, - _keep_num_dims, - _asymmetric_quantize_inputs); -} - -inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = beta(); _o->beta = _e; } -} - -inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSoftmaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beta = _o->beta; - return tflite::CreateSoftmaxOptions( - _fbb, - _beta); -} - -inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatenationOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatenationOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateConcatenationOptions( - _fbb, - _axis, - _fused_activation_function); -} - -inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } -} - -inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateAddOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); -} - -inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MulOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMulOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateMulOptions( - _fbb, - _fused_activation_function); -} - -inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new L2NormOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateL2NormOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateL2NormOptions( - _fbb, - _fused_activation_function); -} - -inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LocalResponseNormalizationOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = radius(); _o->radius = _e; } - { auto _e = bias(); _o->bias = _e; } - { auto _e = alpha(); _o->alpha = _e; } - { auto _e = beta(); _o->beta = _e; } -} - -inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _radius = _o->radius; - auto _bias = _o->bias; - auto _alpha = _o->alpha; - auto _beta = _o->beta; - return tflite::CreateLocalResponseNormalizationOptions( - _fbb, - _radius, - _bias, - _alpha, - _beta); -} - -inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = kernel_type(); _o->kernel_type = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _kernel_type = _o->kernel_type; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _kernel_type, - _asymmetric_quantize_inputs); -} - -inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnidirectionalSequenceLSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateUnidirectionalSequenceLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _time_major, - _asymmetric_quantize_inputs); -} - -inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceLSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _merge_outputs = _o->merge_outputs; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _merge_outputs, - _time_major, - _asymmetric_quantize_inputs); -} - -inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeBilinearOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } -} - -inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeBilinearOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeBilinearOptions( - _fbb, - _align_corners, - _half_pixel_centers); -} - -inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeNearestNeighborOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } -} - -inline flatbuffers::Offset ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeNearestNeighborOptions( - _fbb, - _align_corners, - _half_pixel_centers); -} - -inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = subgraph(); _o->subgraph = _e; } -} - -inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _subgraph = _o->subgraph; - return tflite::CreateCallOptions( - _fbb, - _subgraph); -} - -inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadOptions( - _fbb); -} - -inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadV2Options( - _fbb); -} - -inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReshapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReshapeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - _new_shape); -} - -inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToBatchNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSpaceToBatchNDOptions( - _fbb); -} - -inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchToSpaceNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBatchToSpaceNDOptions( - _fbb); -} - -inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SkipGramOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = ngram_size(); _o->ngram_size = _e; } - { auto _e = max_skip_size(); _o->max_skip_size = _e; } - { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; } -} - -inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSkipGramOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _ngram_size = _o->ngram_size; - auto _max_skip_size = _o->max_skip_size; - auto _include_all_ngrams = _o->include_all_ngrams; - return tflite::CreateSkipGramOptions( - _fbb, - _ngram_size, - _max_skip_size, - _include_all_ngrams); -} - -inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToDepthOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } -} - -inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateSpaceToDepthOptions( - _fbb, - _block_size); -} - -inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthToSpaceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } -} - -inline flatbuffers::Offset DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateDepthToSpaceOptions( - _fbb, - _block_size); -} - -inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } -} - -inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateSubOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); -} - -inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DivOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDivOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateDivOptions( - _fbb, - _fused_activation_function); -} - -inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TopKV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTopKV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTopKV2Options( - _fbb); -} - -inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EmbeddingLookupSparseOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = combiner(); _o->combiner = _e; } -} - -inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _combiner = _o->combiner; - return tflite::CreateEmbeddingLookupSparseOptions( - _fbb, - _combiner); -} - -inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateGatherOptions( - _fbb, - _axis); -} - -inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTransposeOptions( - _fbb); -} - -inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpOptions( - _fbb); -} - -inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CosOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCosOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateCosOptions( - _fbb); -} - -inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReducerOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = keep_dims(); _o->keep_dims = _e; } -} - -inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReducerOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _keep_dims = _o->keep_dims; - return tflite::CreateReducerOptions( - _fbb, - _keep_dims); -} - -inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SqueezeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSqueezeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - _squeeze_dims); -} - -inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } -} - -inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitOptions( - _fbb, - _num_splits); -} - -inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitVOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } -} - -inline flatbuffers::Offset SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitVOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitVOptions( - _fbb, - _num_splits); -} - -inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new StridedSliceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = begin_mask(); _o->begin_mask = _e; } - { auto _e = end_mask(); _o->end_mask = _e; } - { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; } - { auto _e = new_axis_mask(); _o->new_axis_mask = _e; } - { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; } -} - -inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateStridedSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _begin_mask = _o->begin_mask; - auto _end_mask = _o->end_mask; - auto _ellipsis_mask = _o->ellipsis_mask; - auto _new_axis_mask = _o->new_axis_mask; - auto _shrink_axis_mask = _o->shrink_axis_mask; - return tflite::CreateStridedSliceOptions( - _fbb, - _begin_mask, - _end_mask, - _ellipsis_mask, - _new_axis_mask, - _shrink_axis_mask); -} - -inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogSoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogSoftmaxOptions( - _fbb); -} - -inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CastOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = in_data_type(); _o->in_data_type = _e; } - { auto _e = out_data_type(); _o->out_data_type = _e; } -} - -inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCastOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _in_data_type = _o->in_data_type; - auto _out_data_type = _o->out_data_type; - return tflite::CreateCastOptions( - _fbb, - _in_data_type, - _out_data_type); -} - -inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DequantizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDequantizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDequantizeOptions( - _fbb); -} - -inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MaximumMinimumOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMaximumMinimumOptions( - _fbb); -} - -inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TileOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTileOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTileOptions( - _fbb); -} - -inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } -} - -inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMaxOptions( - _fbb, - _output_type); -} - -inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMinOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } -} - -inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMinOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMinOptions( - _fbb, - _output_type); -} - -inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterOptions( - _fbb); -} - -inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterEqualOptions( - _fbb); -} - -inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessOptions( - _fbb); -} - -inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessEqualOptions( - _fbb); -} - -inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NegOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNegOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNegOptions( - _fbb); -} - -inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectOptions( - _fbb); -} - -inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SliceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSliceOptions( - _fbb); -} - -inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeConvOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } -} - -inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeConvOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - return tflite::CreateTransposeConvOptions( - _fbb, - _padding, - _stride_w, - _stride_h); -} - -inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpandDimsOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpandDimsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpandDimsOptions( - _fbb); -} - -inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparseToDenseOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = validate_indices(); _o->validate_indices = _e; } -} - -inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparseToDenseOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _validate_indices = _o->validate_indices; - return tflite::CreateSparseToDenseOptions( - _fbb, - _validate_indices); -} - -inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateEqualOptions( - _fbb); -} - -inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NotEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNotEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNotEqualOptions( - _fbb); -} - -inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ShapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = out_type(); _o->out_type = _e; } -} - -inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateShapeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _out_type = _o->out_type; - return tflite::CreateShapeOptions( - _fbb, - _out_type); -} - -inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RankOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRankOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRankOptions( - _fbb); -} - -inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PowOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePowOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePowOptions( - _fbb); -} - -inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FakeQuantOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); _o->min = _e; } - { auto _e = max(); _o->max = _e; } - { auto _e = num_bits(); _o->num_bits = _e; } - { auto _e = narrow_range(); _o->narrow_range = _e; } -} - -inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFakeQuantOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min; - auto _max = _o->max; - auto _num_bits = _o->num_bits; - auto _narrow_range = _o->narrow_range; - return tflite::CreateFakeQuantOptions( - _fbb, - _min, - _max, - _num_bits, - _narrow_range); -} - -inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PackOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values_count(); _o->values_count = _e; } - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePackOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values_count = _o->values_count; - auto _axis = _o->axis; - return tflite::CreatePackOptions( - _fbb, - _values_count, - _axis); -} - -inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalOrOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalOrOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalOrOptions( - _fbb); -} - -inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OneHotOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOneHotOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateOneHotOptions( - _fbb, - _axis); -} - -inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AbsOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAbsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAbsOptions( - _fbb); -} - -inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HardSwishOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHardSwishOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHardSwishOptions( - _fbb); -} - -inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalAndOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalAndOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalAndOptions( - _fbb); -} - -inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalNotOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalNotOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalNotOptions( - _fbb); -} - -inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnpackOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num(); _o->num = _e; } - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnpackOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num = _o->num; - auto _axis = _o->axis; - return tflite::CreateUnpackOptions( - _fbb, - _num, - _axis); -} - -inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorDivOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorDivOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorDivOptions( - _fbb); -} - -inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquareOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquareOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquareOptions( - _fbb); -} - -inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ZerosLikeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateZerosLikeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateZerosLikeOptions( - _fbb); -} - -inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FillOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFillOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFillOptions( - _fbb); -} - -inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorModOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorModOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorModOptions( - _fbb); -} - -inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RangeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRangeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRangeOptions( - _fbb); -} - -inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LeakyReluOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = alpha(); _o->alpha = _e; } -} - -inline flatbuffers::Offset LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLeakyReluOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _alpha = _o->alpha; - return tflite::CreateLeakyReluOptions( - _fbb, - _alpha); -} - -inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquaredDifferenceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquaredDifferenceOptions( - _fbb); -} - -inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MirrorPadOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = mode(); _o->mode = _e; } -} - -inline flatbuffers::Offset MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMirrorPadOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _mode = _o->mode; - return tflite::CreateMirrorPadOptions( - _fbb, - _mode); -} - -inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UniqueOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = idx_out_type(); _o->idx_out_type = _e; } -} - -inline flatbuffers::Offset UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUniqueOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _idx_out_type = _o->idx_out_type; - return tflite::CreateUniqueOptions( - _fbb, - _idx_out_type); -} - -inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateReverseV2Options( - _fbb); -} - -inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAddNOptions( - _fbb); -} - -inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherNdOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherNdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGatherNdOptions( - _fbb); -} - -inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new WhereOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhereOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateWhereOptions( - _fbb); -} - -inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseSequenceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = seq_dim(); _o->seq_dim = _e; } - { auto _e = batch_dim(); _o->batch_dim = _e; } -} - -inline flatbuffers::Offset ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseSequenceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _seq_dim = _o->seq_dim; - auto _batch_dim = _o->batch_dim; - return tflite::CreateReverseSequenceOptions( - _fbb, - _seq_dim, - _batch_dim); -} - -inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatrixDiagOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixDiagOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixDiagOptions( - _fbb); -} - -inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateQuantizeOptions( - _fbb); -} - -inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatrixSetDiagOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixSetDiagOptions( - _fbb); -} - -inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new IfOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; } - { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; } -} - -inline flatbuffers::Offset IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateIfOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _then_subgraph_index = _o->then_subgraph_index; - auto _else_subgraph_index = _o->else_subgraph_index; - return tflite::CreateIfOptions( - _fbb, - _then_subgraph_index, - _else_subgraph_index); -} - -inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOnceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; } -} - -inline flatbuffers::Offset CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOnceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _init_subgraph_index = _o->init_subgraph_index; - return tflite::CreateCallOnceOptions( - _fbb, - _init_subgraph_index); -} - -inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new WhileOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } - { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } -} - -inline flatbuffers::Offset WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhileOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _cond_subgraph_index = _o->cond_subgraph_index; - auto _body_subgraph_index = _o->body_subgraph_index; - return tflite::CreateWhileOptions( - _fbb, - _cond_subgraph_index, - _body_subgraph_index); -} - -inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV4OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV4Options( - _fbb); -} - -inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV5OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV5Options( - _fbb); -} - -inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ScatterNdOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateScatterNdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateScatterNdOptions( - _fbb); -} - -inline SelectV2OptionsT *SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectV2Options( - _fbb); -} - -inline DensifyOptionsT *DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DensifyOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDensifyOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDensifyOptions( - _fbb); -} - -inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SegmentSumOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSegmentSumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSegmentSumOptions( - _fbb); -} - -inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchMatMulOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = adj_x(); _o->adj_x = _e; } - { auto _e = adj_y(); _o->adj_y = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchMatMulOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _adj_x = _o->adj_x; - auto _adj_y = _o->adj_y; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBatchMatMulOptions( - _fbb, - _adj_x, - _adj_y, - _asymmetric_quantize_inputs); -} - -inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CumsumOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = exclusive(); _o->exclusive = _e; } - { auto _e = reverse(); _o->reverse = _e; } -} - -inline flatbuffers::Offset CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCumsumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _exclusive = _o->exclusive; - auto _reverse = _o->reverse; - return tflite::CreateCumsumOptions( - _fbb, - _exclusive, - _reverse); -} - -inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BroadcastToOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBroadcastToOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBroadcastToOptions( - _fbb); -} - -inline Rfft2dOptionsT *Rfft2dOptions::UnPack( - const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Rfft2dOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Rfft2dOptions::UnPackTo( - Rfft2dOptionsT *_o, - const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset Rfft2dOptions::Pack( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRfft2dOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, - const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { - flatbuffers::FlatBufferBuilder *__fbb; - const Rfft2dOptionsT *__o; - const flatbuffers::rehasher_function_t *__rehasher; - } _va = {&_fbb, _o, _rehasher}; - (void)_va; - return tflite::CreateRfft2dOptions(_fbb); -} - -inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorCodeT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; } - { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); } - { auto _e = version(); _o->version = _e; } - { auto _e = builtin_code(); _o->builtin_code = _e; } -} - -inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperatorCode(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _deprecated_builtin_code = _o->deprecated_builtin_code; - auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); - auto _version = _o->version; - auto _builtin_code = _o->builtin_code; - return tflite::CreateOperatorCode( - _fbb, - _deprecated_builtin_code, - _custom_code, - _version, - _builtin_code); -} - -inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = opcode_index(); _o->opcode_index = _e; } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = builtin_options_type(); _o->builtin_options.type = _e; } - { auto _e = builtin_options(); if (_e) _o->builtin_options.value = tflite::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); } - { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_options.begin()); } } - { auto _e = custom_options_format(); _o->custom_options_format = _e; } - { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } } - { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperator(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _opcode_index = _o->opcode_index; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _builtin_options_type = _o->builtin_options.type; - auto _builtin_options = _o->builtin_options.Pack(_fbb); - auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0; - auto _custom_options_format = _o->custom_options_format; - auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; - auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0; - return tflite::CreateOperator( - _fbb, - _opcode_index, - _inputs, - _outputs, - _builtin_options_type, - _builtin_options, - _custom_options, - _custom_options_format, - _mutating_variable_inputs, - _intermediates); -} - -inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubGraphT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = name(); if (_e) _o->name = _e->str(); } -} - -inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubGraph(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return tflite::CreateSubGraph( - _fbb, - _tensors, - _inputs, - _outputs, - _operators, - _name); -} - -inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BufferT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } -} - -inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBuffer(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16); - auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; - return tflite::CreateBuffer( - _fbb, - _data); -} - -inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MetadataT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = buffer(); _o->buffer = _e; } -} - -inline flatbuffers::Offset Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMetadata(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _buffer = _o->buffer; - return tflite::CreateMetadata( - _fbb, - _name, - _buffer); -} - -inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorMapT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TensorMap::UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = tensor_index(); _o->tensor_index = _e; } -} - -inline flatbuffers::Offset TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensorMap(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _tensor_index = _o->tensor_index; - return tflite::CreateTensorMap( - _fbb, - _name, - _tensor_index); -} - -inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SignatureDefT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SignatureDef::UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = method_name(); if (_e) _o->method_name = _e->str(); } - { auto _e = key(); if (_e) _o->key = _e->str(); } -} - -inline flatbuffers::Offset SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSignatureDef(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _method_name = _o->method_name.empty() ? 0 : _fbb.CreateString(_o->method_name); - auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); - return tflite::CreateSignatureDef( - _fbb, - _inputs, - _outputs, - _method_name, - _key); -} - -inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ModelT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = version(); _o->version = _e; } - { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = description(); if (_e) _o->description = _e->str(); } - { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } } - { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->signature_defs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } -} - -inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateModel(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _version = _o->version; - auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); - auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; - auto _metadata = _o->metadata.size() ? _fbb.CreateVector> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateModel( - _fbb, - _version, - _operator_codes, - _subgraphs, - _description, - _buffers, - _metadata_buffer, - _metadata, - _signature_defs); -} - -inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { - switch (type) { - case QuantizationDetails_NONE: { - return true; - } - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} - -inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyQuantizationDetails( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - value = new tflite::CustomQuantizationT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void QuantizationDetailsUnion::Reset() { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = QuantizationDetails_NONE; -} - -inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) { - switch (type) { - case SparseIndexVector_NONE: { - return true; - } - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} - -inline bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifySparseIndexVector( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - return CreateInt32Vector(_fbb, ptr, _rehasher).Union(); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint16Vector(_fbb, ptr, _rehasher).Union(); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint8Vector(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case SparseIndexVector_Int32Vector: { - value = new tflite::Int32VectorT(*reinterpret_cast(u.value)); - break; - } - case SparseIndexVector_Uint16Vector: { - value = new tflite::Uint16VectorT(*reinterpret_cast(u.value)); - break; - } - case SparseIndexVector_Uint8Vector: { - value = new tflite::Uint8VectorT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void SparseIndexVectorUnion::Reset() { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = SparseIndexVector_NONE; -} - -inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { - switch (type) { - case BuiltinOptions_NONE: { - return true; - } - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} - -inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyBuiltinOptions( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} - -inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} - -inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - return CreateMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - return CreatePadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - return CreateSubOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - return CreateDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - return CreateCastOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - return CreateNegOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - return CreateTileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - return CreatePowOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - return CreatePackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - return CreateFillOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - return CreateCosOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - return CreateRankOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - return CreateIfOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - return CreateSelectV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - return CreateDensifyOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - return CreateCumsumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} - -inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - value = new tflite::Conv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - value = new tflite::DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - value = new tflite::ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSHProjectionOptions: { - value = new tflite::LSHProjectionOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Pool2DOptions: { - value = new tflite::Pool2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SVDFOptions: { - value = new tflite::SVDFOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RNNOptions: { - value = new tflite::RNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FullyConnectedOptions: { - value = new tflite::FullyConnectedOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SoftmaxOptions: { - value = new tflite::SoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatenationOptions: { - value = new tflite::ConcatenationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddOptions: { - value = new tflite::AddOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_L2NormOptions: { - value = new tflite::L2NormOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - value = new tflite::LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSTMOptions: { - value = new tflite::LSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - value = new tflite::ResizeBilinearOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOptions: { - value = new tflite::CallOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReshapeOptions: { - value = new tflite::ReshapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SkipGramOptions: { - value = new tflite::SkipGramOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - value = new tflite::SpaceToDepthOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - value = new tflite::EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MulOptions: { - value = new tflite::MulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadOptions: { - value = new tflite::PadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherOptions: { - value = new tflite::GatherOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - value = new tflite::BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - value = new tflite::SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeOptions: { - value = new tflite::TransposeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReducerOptions: { - value = new tflite::ReducerOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SubOptions: { - value = new tflite::SubOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DivOptions: { - value = new tflite::DivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SqueezeOptions: { - value = new tflite::SqueezeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SequenceRNNOptions: { - value = new tflite::SequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_StridedSliceOptions: { - value = new tflite::StridedSliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpOptions: { - value = new tflite::ExpOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TopKV2Options: { - value = new tflite::TopKV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitOptions: { - value = new tflite::SplitOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - value = new tflite::LogSoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CastOptions: { - value = new tflite::CastOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DequantizeOptions: { - value = new tflite::DequantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - value = new tflite::MaximumMinimumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMaxOptions: { - value = new tflite::ArgMaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessOptions: { - value = new tflite::LessOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NegOptions: { - value = new tflite::NegOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadV2Options: { - value = new tflite::PadV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterOptions: { - value = new tflite::GreaterOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterEqualOptions: { - value = new tflite::GreaterEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessEqualOptions: { - value = new tflite::LessEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectOptions: { - value = new tflite::SelectOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SliceOptions: { - value = new tflite::SliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeConvOptions: { - value = new tflite::TransposeConvOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SparseToDenseOptions: { - value = new tflite::SparseToDenseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TileOptions: { - value = new tflite::TileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpandDimsOptions: { - value = new tflite::ExpandDimsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EqualOptions: { - value = new tflite::EqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NotEqualOptions: { - value = new tflite::NotEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ShapeOptions: { - value = new tflite::ShapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PowOptions: { - value = new tflite::PowOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMinOptions: { - value = new tflite::ArgMinOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FakeQuantOptions: { - value = new tflite::FakeQuantOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PackOptions: { - value = new tflite::PackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalOrOptions: { - value = new tflite::LogicalOrOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_OneHotOptions: { - value = new tflite::OneHotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalAndOptions: { - value = new tflite::LogicalAndOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalNotOptions: { - value = new tflite::LogicalNotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnpackOptions: { - value = new tflite::UnpackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorDivOptions: { - value = new tflite::FloorDivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquareOptions: { - value = new tflite::SquareOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ZerosLikeOptions: { - value = new tflite::ZerosLikeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FillOptions: { - value = new tflite::FillOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - value = new tflite::BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - value = new tflite::BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - value = new tflite::UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorModOptions: { - value = new tflite::FloorModOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RangeOptions: { - value = new tflite::RangeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - value = new tflite::ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LeakyReluOptions: { - value = new tflite::LeakyReluOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquaredDifferenceOptions: { - value = new tflite::SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MirrorPadOptions: { - value = new tflite::MirrorPadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AbsOptions: { - value = new tflite::AbsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitVOptions: { - value = new tflite::SplitVOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UniqueOptions: { - value = new tflite::UniqueOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReverseV2Options: { - value = new tflite::ReverseV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddNOptions: { - value = new tflite::AddNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherNdOptions: { - value = new tflite::GatherNdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CosOptions: { - value = new tflite::CosOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_WhereOptions: { - value = new tflite::WhereOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RankOptions: { - value = new tflite::RankOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReverseSequenceOptions: { - value = new tflite::ReverseSequenceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MatrixDiagOptions: { - value = new tflite::MatrixDiagOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_QuantizeOptions: { - value = new tflite::QuantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MatrixSetDiagOptions: { - value = new tflite::MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HardSwishOptions: { - value = new tflite::HardSwishOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_IfOptions: { - value = new tflite::IfOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_WhileOptions: { - value = new tflite::WhileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthToSpaceOptions: { - value = new tflite::DepthToSpaceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - value = new tflite::NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - value = new tflite::NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ScatterNdOptions: { - value = new tflite::ScatterNdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectV2Options: { - value = new tflite::SelectV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DensifyOptions: { - value = new tflite::DensifyOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SegmentSumOptions: { - value = new tflite::SegmentSumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchMatMulOptions: { - value = new tflite::BatchMatMulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CumsumOptions: { - value = new tflite::CumsumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOnceOptions: { - value = new tflite::CallOnceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BroadcastToOptions: { - value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Rfft2dOptions: { - value = new tflite::Rfft2dOptionsT( - *reinterpret_cast(u.value)); - break; - } - default: - break; - } -} - -inline void BuiltinOptionsUnion::Reset() { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = BuiltinOptions_NONE; -} - -inline const tflite::Model *GetModel(const void *buf) { - return flatbuffers::GetRoot(buf); -} - -inline const tflite::Model *GetSizePrefixedModel(const void *buf) { - return flatbuffers::GetSizePrefixedRoot(buf); -} - -inline const char *ModelIdentifier() { - return "TFL3"; -} - -inline bool ModelBufferHasIdentifier(const void *buf) { - return flatbuffers::BufferHasIdentifier( - buf, ModelIdentifier()); -} - -inline bool VerifyModelBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifyBuffer(ModelIdentifier()); -} - -inline bool VerifySizePrefixedModelBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifySizePrefixedBuffer(ModelIdentifier()); -} - -inline const char *ModelExtension() { - return "tflite"; -} - -inline void FinishModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.Finish(root, ModelIdentifier()); -} - -inline void FinishSizePrefixedModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.FinishSizePrefixed(root, ModelIdentifier()); -} - -inline std::unique_ptr UnPackModel( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetModel(buf)->UnPack(res)); -} - -inline std::unique_ptr UnPackSizePrefixedModel( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetSizePrefixedModel(buf)->UnPack(res)); -} - -} // namespace tflite - -#endif // FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_utils.cc b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_utils.cc deleted file mode 100644 index fc19290b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_utils.cc +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/schema/schema_utils.h" - -#include - -#include "tensorflow/lite/kernels/internal/compatibility.h" - -namespace tflite { - -// The following GetBuiltinCode methods are the utility methods for reading -// builtin operatore code, ensuring compatibility issues between v3 and v3a -// schema. Always the maximum value of the two fields always will be the correct -// value as follows: -// -// - Supporting schema version v3 models -// -// The `builtin_code` field is not available in the v3 models. Flatbuffer -// library will feed zero value, which is the default value in the v3a schema. -// The actual builtin operatore code value will exist in the -// `deprecated_builtin_code` field. At the same time, it implies that -// `deprecated_builtin_code` >= `builtin_code` and the maximum value of the two -// fields will be same with `deprecated_builtin_code'. -// -// - Supporting builtin operator codes beyonds 127 -// -// New builtin operators, whose operator code is larger than 127, can not be -// assigned to the `deprecated_builtin_code` field. In such cases, the -// value of the `builtin_code` field should be used for the builtin operator -// code. In the case, the maximum value of the two fields will be the value of -// the `builtin_code` as the right value. - -BuiltinOperator GetBuiltinCode(const OperatorCode* op_code) { - // Caller should guarantee that the given argument value is not a nullptr. - TFLITE_DCHECK(op_code != nullptr); - - return std::max( - op_code->builtin_code(), - static_cast(op_code->deprecated_builtin_code())); -} - -BuiltinOperator GetBuiltinCode(const OperatorCodeT* op_code) { - // Caller should guarantee that the given argument value is not a nullptr. - TFLITE_DCHECK(op_code != nullptr); - - return std::max(op_code->builtin_code, static_cast( - op_code->deprecated_builtin_code)); -} - -} // namespace tflite diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_utils.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_utils.h deleted file mode 100644 index 453276b9..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/schema/schema_utils.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ -#define TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ - -#include "flatbuffers/flatbuffers.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// The following methods are introduced to resolve op builtin code shortage -// problem. The new builtin opreator will be assigned to the extended builtin -// code field in the flatbuffer schema. Those methods helps to hide builtin code -// details. -BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); - -BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); - -} // namespace tflite - -#endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/version.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/version.h deleted file mode 100644 index f667447b..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/tensorflow/lite/version.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_VERSION_H_ -#define TENSORFLOW_LITE_VERSION_H_ - -#include "tensorflow/core/public/version.h" - -// The version number of the Schema. Ideally all changes will be backward -// compatible. If that ever changes, we must ensure that version is the first -// entry in the new tflite root so that we can see that version is not 1. -#define TFLITE_SCHEMA_VERSION (3) - -// TensorFlow Lite Runtime version. -// This value is currently shared with that of TensorFlow. -#define TFLITE_VERSION_STRING TF_VERSION_STRING - -#endif // TENSORFLOW_LITE_VERSION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/LICENSE.txt b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/LICENSE.txt deleted file mode 100644 index d6456956..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/base.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/base.h deleted file mode 100644 index 1b7f189a..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/base.h +++ /dev/null @@ -1,399 +0,0 @@ -#ifndef FLATBUFFERS_BASE_H_ -#define FLATBUFFERS_BASE_H_ - -// clang-format off - -// If activate should be declared and included first. -#if defined(FLATBUFFERS_MEMORY_LEAK_TRACKING) && \ - defined(_MSC_VER) && defined(_DEBUG) - // The _CRTDBG_MAP_ALLOC inside will replace - // calloc/free (etc) to its debug version using #define directives. - #define _CRTDBG_MAP_ALLOC - #include - #include - // Replace operator new by trace-enabled version. - #define DEBUG_NEW new(_NORMAL_BLOCK, __FILE__, __LINE__) - #define new DEBUG_NEW -#endif - -#if !defined(FLATBUFFERS_ASSERT) -#include -// #include FIX check assert -#define FLATBUFFERS_ASSERT assert -#elif defined(FLATBUFFERS_ASSERT_INCLUDE) -// Include file with forward declaration -#include FLATBUFFERS_ASSERT_INCLUDE -#endif - -#ifndef ARDUINO -#include -#endif - -#include -#include -#include - -#if defined(ARDUINO) && !defined(ARDUINOSTL_M_H) - #include -#else - #include -#endif - -#include -#include -#include -#include -#include -#include -#include - -#ifdef _STLPORT_VERSION - #define FLATBUFFERS_CPP98_STL -#endif -#ifndef FLATBUFFERS_CPP98_STL - #include -#endif - -#include "flatbuffers/stl_emulation.h" - -#if defined(__ICCARM__) -#include -#endif - -// Note the __clang__ check is needed, because clang presents itself -// as an older GNUC compiler (4.2). -// Clang 3.3 and later implement all of the ISO C++ 2011 standard. -// Clang 3.4 and later implement all of the ISO C++ 2014 standard. -// http://clang.llvm.org/cxx_status.html - -// Note the MSVC value '__cplusplus' may be incorrect: -// The '__cplusplus' predefined macro in the MSVC stuck at the value 199711L, -// indicating (erroneously!) that the compiler conformed to the C++98 Standard. -// This value should be correct starting from MSVC2017-15.7-Preview-3. -// The '__cplusplus' will be valid only if MSVC2017-15.7-P3 and the `/Zc:__cplusplus` switch is set. -// Workaround (for details see MSDN): -// Use the _MSC_VER and _MSVC_LANG definition instead of the __cplusplus for compatibility. -// The _MSVC_LANG macro reports the Standard version regardless of the '/Zc:__cplusplus' switch. - -#if defined(__GNUC__) && !defined(__clang__) - #define FLATBUFFERS_GCC (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) -#else - #define FLATBUFFERS_GCC 0 -#endif - -#if defined(__clang__) - #define FLATBUFFERS_CLANG (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) -#else - #define FLATBUFFERS_CLANG 0 -#endif - -/// @cond FLATBUFFERS_INTERNAL -#if __cplusplus <= 199711L && \ - (!defined(_MSC_VER) || _MSC_VER < 1600) && \ - (!defined(__GNUC__) || \ - (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40400)) - #error A C++11 compatible compiler with support for the auto typing is \ - required for FlatBuffers. - #error __cplusplus _MSC_VER __GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__ -#endif - -#if !defined(__clang__) && \ - defined(__GNUC__) && \ - (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__ < 40600) - // Backwards compatibility for g++ 4.4, and 4.5 which don't have the nullptr - // and constexpr keywords. Note the __clang__ check is needed, because clang - // presents itself as an older GNUC compiler. - #ifndef nullptr_t - const class nullptr_t { - public: - template inline operator T*() const { return 0; } - private: - void operator&() const; - } nullptr = {}; - #endif - #ifndef constexpr - #define constexpr const - #endif -#endif - -// The wire format uses a little endian encoding (since that's efficient for -// the common platforms). -#if defined(__s390x__) - #define FLATBUFFERS_LITTLEENDIAN 0 -#endif // __s390x__ -#if !defined(FLATBUFFERS_LITTLEENDIAN) - #if defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__) - #if (defined(__BIG_ENDIAN__) || \ - (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) - #define FLATBUFFERS_LITTLEENDIAN 0 - #else - #define FLATBUFFERS_LITTLEENDIAN 1 - #endif // __BIG_ENDIAN__ - #elif defined(_MSC_VER) - #if defined(_M_PPC) - #define FLATBUFFERS_LITTLEENDIAN 0 - #else - #define FLATBUFFERS_LITTLEENDIAN 1 - #endif - #else - #error Unable to determine endianness, define FLATBUFFERS_LITTLEENDIAN. - #endif -#endif // !defined(FLATBUFFERS_LITTLEENDIAN) - -#define FLATBUFFERS_VERSION_MAJOR 1 -#define FLATBUFFERS_VERSION_MINOR 12 -#define FLATBUFFERS_VERSION_REVISION 0 -#define FLATBUFFERS_STRING_EXPAND(X) #X -#define FLATBUFFERS_STRING(X) FLATBUFFERS_STRING_EXPAND(X) -namespace flatbuffers { - // Returns version as string "MAJOR.MINOR.REVISION". - const char* FLATBUFFERS_VERSION(); -} - -#if (!defined(_MSC_VER) || _MSC_VER > 1600) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 407)) || \ - defined(__clang__) - #define FLATBUFFERS_FINAL_CLASS final - #define FLATBUFFERS_OVERRIDE override - #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE : flatbuffers::voffset_t -#else - #define FLATBUFFERS_FINAL_CLASS - #define FLATBUFFERS_OVERRIDE - #define FLATBUFFERS_VTABLE_UNDERLYING_TYPE -#endif - -#if (!defined(_MSC_VER) || _MSC_VER >= 1900) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \ - (defined(__cpp_constexpr) && __cpp_constexpr >= 200704) - #define FLATBUFFERS_CONSTEXPR constexpr -#else - #define FLATBUFFERS_CONSTEXPR const -#endif - -#if (defined(__cplusplus) && __cplusplus >= 201402L) || \ - (defined(__cpp_constexpr) && __cpp_constexpr >= 201304) - #define FLATBUFFERS_CONSTEXPR_CPP14 FLATBUFFERS_CONSTEXPR -#else - #define FLATBUFFERS_CONSTEXPR_CPP14 -#endif - -#if (defined(__GXX_EXPERIMENTAL_CXX0X__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 406)) || \ - (defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 190023026)) || \ - defined(__clang__) - #define FLATBUFFERS_NOEXCEPT noexcept -#else - #define FLATBUFFERS_NOEXCEPT -#endif - -// NOTE: the FLATBUFFERS_DELETE_FUNC macro may change the access mode to -// private, so be sure to put it at the end or reset access mode explicitly. -#if (!defined(_MSC_VER) || _MSC_FULL_VER >= 180020827) && \ - (!defined(__GNUC__) || (__GNUC__ * 100 + __GNUC_MINOR__ >= 404)) || \ - defined(__clang__) - #define FLATBUFFERS_DELETE_FUNC(func) func = delete; -#else - #define FLATBUFFERS_DELETE_FUNC(func) private: func; -#endif - -#ifndef FLATBUFFERS_HAS_STRING_VIEW - // Only provide flatbuffers::string_view if __has_include can be used - // to detect a header that provides an implementation - #if defined(__has_include) - // Check for std::string_view (in c++17) - #if __has_include() && (__cplusplus >= 201606 || (defined(_HAS_CXX17) && _HAS_CXX17)) - #include - namespace flatbuffers { - typedef std::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 - // Check for std::experimental::string_view (in c++14, compiler-dependent) - #elif __has_include() && (__cplusplus >= 201411) - #include - namespace flatbuffers { - typedef std::experimental::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 - // Check for absl::string_view - #elif __has_include("absl/strings/string_view.h") - #include "absl/strings/string_view.h" - namespace flatbuffers { - typedef absl::string_view string_view; - } - #define FLATBUFFERS_HAS_STRING_VIEW 1 - #endif - #endif // __has_include -#endif // !FLATBUFFERS_HAS_STRING_VIEW - -#ifndef FLATBUFFERS_HAS_NEW_STRTOD - // Modern (C++11) strtod and strtof functions are available for use. - // 1) nan/inf strings as argument of strtod; - // 2) hex-float as argument of strtod/strtof. - #if (defined(_MSC_VER) && _MSC_VER >= 1900) || \ - (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409)) || \ - (defined(__clang__)) - #define FLATBUFFERS_HAS_NEW_STRTOD 1 - #endif -#endif // !FLATBUFFERS_HAS_NEW_STRTOD - -#ifndef FLATBUFFERS_LOCALE_INDEPENDENT - // Enable locale independent functions {strtof_l, strtod_l,strtoll_l, strtoull_l}. - // They are part of the POSIX-2008 but not part of the C/C++ standard. - // GCC/Clang have definition (_XOPEN_SOURCE>=700) if POSIX-2008. - #if ((defined(_MSC_VER) && _MSC_VER >= 1800) || \ - (defined(_XOPEN_SOURCE) && (_XOPEN_SOURCE>=700))) - #define FLATBUFFERS_LOCALE_INDEPENDENT 1 - #else - #define FLATBUFFERS_LOCALE_INDEPENDENT 0 - #endif -#endif // !FLATBUFFERS_LOCALE_INDEPENDENT - -// Suppress Undefined Behavior Sanitizer (recoverable only). Usage: -// - __supress_ubsan__("undefined") -// - __supress_ubsan__("signed-integer-overflow") -#if defined(__clang__) && (__clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >=7)) - #define __supress_ubsan__(type) __attribute__((no_sanitize(type))) -#elif defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 409) - #define __supress_ubsan__(type) __attribute__((no_sanitize_undefined)) -#else - #define __supress_ubsan__(type) -#endif - -// This is constexpr function used for checking compile-time constants. -// Avoid `#pragma warning(disable: 4127) // C4127: expression is constant`. -template FLATBUFFERS_CONSTEXPR inline bool IsConstTrue(T t) { - return !!t; -} - -// Enable C++ attribute [[]] if std:c++17 or higher. -#if ((__cplusplus >= 201703L) \ - || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))) - // All attributes unknown to an implementation are ignored without causing an error. - #define FLATBUFFERS_ATTRIBUTE(attr) [[attr]] - - #define FLATBUFFERS_FALLTHROUGH() [[fallthrough]] -#else - #define FLATBUFFERS_ATTRIBUTE(attr) - - #if FLATBUFFERS_CLANG >= 30800 - #define FLATBUFFERS_FALLTHROUGH() [[clang::fallthrough]] - #elif FLATBUFFERS_GCC >= 70300 - #define FLATBUFFERS_FALLTHROUGH() [[gnu::fallthrough]] - #else - #define FLATBUFFERS_FALLTHROUGH() - #endif -#endif - -/// @endcond - -/// @file -namespace flatbuffers { - -/// @cond FLATBUFFERS_INTERNAL -// Our default offset / size type, 32bit on purpose on 64bit systems. -// Also, using a consistent offset type maintains compatibility of serialized -// offset values between 32bit and 64bit systems. -typedef uint32_t uoffset_t; - -// Signed offsets for references that can go in both directions. -typedef int32_t soffset_t; - -// Offset/index used in v-tables, can be changed to uint8_t in -// format forks to save a bit of space if desired. -typedef uint16_t voffset_t; - -typedef uintmax_t largest_scalar_t; - -// In 32bits, this evaluates to 2GB - 1 -#define FLATBUFFERS_MAX_BUFFER_SIZE ((1ULL << (sizeof(::flatbuffers::soffset_t) * 8 - 1)) - 1) - -// We support aligning the contents of buffers up to this size. -#define FLATBUFFERS_MAX_ALIGNMENT 16 - -#if defined(_MSC_VER) - #pragma warning(push) - #pragma warning(disable: 4127) // C4127: conditional expression is constant -#endif - -template T EndianSwap(T t) { - #if defined(_MSC_VER) - #define FLATBUFFERS_BYTESWAP16 _byteswap_ushort - #define FLATBUFFERS_BYTESWAP32 _byteswap_ulong - #define FLATBUFFERS_BYTESWAP64 _byteswap_uint64 - #elif defined(__ICCARM__) - #define FLATBUFFERS_BYTESWAP16 __REV16 - #define FLATBUFFERS_BYTESWAP32 __REV - #define FLATBUFFERS_BYTESWAP64(x) \ - ((__REV(static_cast(x >> 32U))) | (static_cast(__REV(static_cast(x)))) << 32U) - #else - #if defined(__GNUC__) && __GNUC__ * 100 + __GNUC_MINOR__ < 408 && !defined(__clang__) - // __builtin_bswap16 was missing prior to GCC 4.8. - #define FLATBUFFERS_BYTESWAP16(x) \ - static_cast(__builtin_bswap32(static_cast(x) << 16)) - #else - #define FLATBUFFERS_BYTESWAP16 __builtin_bswap16 - #endif - #define FLATBUFFERS_BYTESWAP32 __builtin_bswap32 - #define FLATBUFFERS_BYTESWAP64 __builtin_bswap64 - #endif - if (sizeof(T) == 1) { // Compile-time if-then's. - return t; - } else if (sizeof(T) == 2) { - union { T t; uint16_t i; } u = { t }; - u.i = FLATBUFFERS_BYTESWAP16(u.i); - return u.t; - } else if (sizeof(T) == 4) { - union { T t; uint32_t i; } u = { t }; - u.i = FLATBUFFERS_BYTESWAP32(u.i); - return u.t; - } else if (sizeof(T) == 8) { - union { T t; uint64_t i; } u = { t }; - u.i = FLATBUFFERS_BYTESWAP64(u.i); - return u.t; - } else { - FLATBUFFERS_ASSERT(0); - return t; - } -} - -#if defined(_MSC_VER) - #pragma warning(pop) -#endif - - -template T EndianScalar(T t) { - #if FLATBUFFERS_LITTLEENDIAN - return t; - #else - return EndianSwap(t); - #endif -} - -template -// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. -__supress_ubsan__("alignment") -T ReadScalar(const void *p) { - return EndianScalar(*reinterpret_cast(p)); -} - -template -// UBSAN: C++ aliasing type rules, see std::bit_cast<> for details. -__supress_ubsan__("alignment") -void WriteScalar(void *p, T t) { - *reinterpret_cast(p) = EndianScalar(t); -} - -template struct Offset; -template __supress_ubsan__("alignment") void WriteScalar(void *p, Offset t) { - *reinterpret_cast(p) = EndianScalar(t.o); -} - -// Computes how many bytes you'd have to pad to be able to write an -// "scalar_size" scalar if the buffer had grown to "buf_size" (downwards in -// memory). -__supress_ubsan__("unsigned-integer-overflow") -inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) { - return ((~buf_size) + 1) & (scalar_size - 1); -} - -} // namespace flatbuffers -#endif // FLATBUFFERS_BASE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/flatbuffers.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/flatbuffers.h deleted file mode 100644 index 332cb5bc..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/flatbuffers.h +++ /dev/null @@ -1,2783 +0,0 @@ -/* - * Copyright 2014 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_H_ -#define FLATBUFFERS_H_ - -#include "flatbuffers/base.h" - -#if defined(FLATBUFFERS_NAN_DEFAULTS) -# include -#endif - -namespace flatbuffers { -// Generic 'operator==' with conditional specialisations. -// T e - new value of a scalar field. -// T def - default of scalar (is known at compile-time). -template inline bool IsTheSameAs(T e, T def) { return e == def; } - -#if defined(FLATBUFFERS_NAN_DEFAULTS) && \ - defined(FLATBUFFERS_HAS_NEW_STRTOD) && (FLATBUFFERS_HAS_NEW_STRTOD > 0) -// Like `operator==(e, def)` with weak NaN if T=(float|double). -template inline bool IsFloatTheSameAs(T e, T def) { - return (e == def) || ((def != def) && (e != e)); -} -template<> inline bool IsTheSameAs(float e, float def) { - return IsFloatTheSameAs(e, def); -} -template<> inline bool IsTheSameAs(double e, double def) { - return IsFloatTheSameAs(e, def); -} -#endif - -// Check 'v' is out of closed range [low; high]. -// Workaround for GCC warning [-Werror=type-limits]: -// comparison is always true due to limited range of data type. -template -inline bool IsOutRange(const T &v, const T &low, const T &high) { - return (v < low) || (high < v); -} - -// Check 'v' is in closed range [low; high]. -template -inline bool IsInRange(const T &v, const T &low, const T &high) { - return !IsOutRange(v, low, high); -} - -// Wrapper for uoffset_t to allow safe template specialization. -// Value is allowed to be 0 to indicate a null object (see e.g. AddOffset). -template struct Offset { - uoffset_t o; - Offset() : o(0) {} - Offset(uoffset_t _o) : o(_o) {} - Offset Union() const { return Offset(o); } - bool IsNull() const { return !o; } -}; - -inline void EndianCheck() { - int endiantest = 1; - // If this fails, see FLATBUFFERS_LITTLEENDIAN above. - FLATBUFFERS_ASSERT(*reinterpret_cast(&endiantest) == - FLATBUFFERS_LITTLEENDIAN); - (void)endiantest; -} - -template FLATBUFFERS_CONSTEXPR size_t AlignOf() { - // clang-format off - #ifdef _MSC_VER - return __alignof(T); - #else - #ifndef alignof - return __alignof__(T); - #else - return alignof(T); - #endif - #endif - // clang-format on -} - -// When we read serialized data from memory, in the case of most scalars, -// we want to just read T, but in the case of Offset, we want to actually -// perform the indirection and return a pointer. -// The template specialization below does just that. -// It is wrapped in a struct since function templates can't overload on the -// return type like this. -// The typedef is for the convenience of callers of this function -// (avoiding the need for a trailing return decltype) -template struct IndirectHelper { - typedef T return_type; - typedef T mutable_return_type; - static const size_t element_stride = sizeof(T); - static return_type Read(const uint8_t *p, uoffset_t i) { - return EndianScalar((reinterpret_cast(p))[i]); - } -}; -template struct IndirectHelper> { - typedef const T *return_type; - typedef T *mutable_return_type; - static const size_t element_stride = sizeof(uoffset_t); - static return_type Read(const uint8_t *p, uoffset_t i) { - p += i * sizeof(uoffset_t); - return reinterpret_cast(p + ReadScalar(p)); - } -}; -template struct IndirectHelper { - typedef const T *return_type; - typedef T *mutable_return_type; - static const size_t element_stride = sizeof(T); - static return_type Read(const uint8_t *p, uoffset_t i) { - return reinterpret_cast(p + i * sizeof(T)); - } -}; - -// An STL compatible iterator implementation for Vector below, effectively -// calling Get() for every element. -template struct VectorIterator { - typedef std::random_access_iterator_tag iterator_category; - typedef IT value_type; - typedef ptrdiff_t difference_type; - typedef IT *pointer; - typedef IT &reference; - - VectorIterator(const uint8_t *data, uoffset_t i) - : data_(data + IndirectHelper::element_stride * i) {} - VectorIterator(const VectorIterator &other) : data_(other.data_) {} - VectorIterator() : data_(nullptr) {} - - VectorIterator &operator=(const VectorIterator &other) { - data_ = other.data_; - return *this; - } - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - VectorIterator &operator=(VectorIterator &&other) { - data_ = other.data_; - return *this; - } - #endif // !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - - bool operator==(const VectorIterator &other) const { - return data_ == other.data_; - } - - bool operator<(const VectorIterator &other) const { - return data_ < other.data_; - } - - bool operator!=(const VectorIterator &other) const { - return data_ != other.data_; - } - - difference_type operator-(const VectorIterator &other) const { - return (data_ - other.data_) / IndirectHelper::element_stride; - } - - IT operator*() const { return IndirectHelper::Read(data_, 0); } - - IT operator->() const { return IndirectHelper::Read(data_, 0); } - - VectorIterator &operator++() { - data_ += IndirectHelper::element_stride; - return *this; - } - - VectorIterator operator++(int) { - VectorIterator temp(data_, 0); - data_ += IndirectHelper::element_stride; - return temp; - } - - VectorIterator operator+(const uoffset_t &offset) const { - return VectorIterator(data_ + offset * IndirectHelper::element_stride, - 0); - } - - VectorIterator &operator+=(const uoffset_t &offset) { - data_ += offset * IndirectHelper::element_stride; - return *this; - } - - VectorIterator &operator--() { - data_ -= IndirectHelper::element_stride; - return *this; - } - - VectorIterator operator--(int) { - VectorIterator temp(data_, 0); - data_ -= IndirectHelper::element_stride; - return temp; - } - - VectorIterator operator-(const uoffset_t &offset) const { - return VectorIterator(data_ - offset * IndirectHelper::element_stride, - 0); - } - - VectorIterator &operator-=(const uoffset_t &offset) { - data_ -= offset * IndirectHelper::element_stride; - return *this; - } - - private: - const uint8_t *data_; -}; - -template -struct VectorReverseIterator : public std::reverse_iterator { - explicit VectorReverseIterator(Iterator iter) - : std::reverse_iterator(iter) {} - - typename Iterator::value_type operator*() const { - return *(std::reverse_iterator::current); - } - - typename Iterator::value_type operator->() const { - return *(std::reverse_iterator::current); - } -}; - -struct String; - -// This is used as a helper type for accessing vectors. -// Vector::data() assumes the vector elements start after the length field. -template class Vector { - public: - typedef VectorIterator::mutable_return_type> - iterator; - typedef VectorIterator::return_type> - const_iterator; - typedef VectorReverseIterator reverse_iterator; - typedef VectorReverseIterator const_reverse_iterator; - - uoffset_t size() const { return EndianScalar(length_); } - - // Deprecated: use size(). Here for backwards compatibility. - FLATBUFFERS_ATTRIBUTE(deprecated("use size() instead")) - uoffset_t Length() const { return size(); } - - typedef typename IndirectHelper::return_type return_type; - typedef typename IndirectHelper::mutable_return_type mutable_return_type; - - return_type Get(uoffset_t i) const { - FLATBUFFERS_ASSERT(i < size()); - return IndirectHelper::Read(Data(), i); - } - - return_type operator[](uoffset_t i) const { return Get(i); } - - // If this is a Vector of enums, T will be its storage type, not the enum - // type. This function makes it convenient to retrieve value with enum - // type E. - template E GetEnum(uoffset_t i) const { - return static_cast(Get(i)); - } - - // If this a vector of unions, this does the cast for you. There's no check - // to make sure this is the right type! - template const U *GetAs(uoffset_t i) const { - return reinterpret_cast(Get(i)); - } - - // If this a vector of unions, this does the cast for you. There's no check - // to make sure this is actually a string! - const String *GetAsString(uoffset_t i) const { - return reinterpret_cast(Get(i)); - } - - const void *GetStructFromOffset(size_t o) const { - return reinterpret_cast(Data() + o); - } - - iterator begin() { return iterator(Data(), 0); } - const_iterator begin() const { return const_iterator(Data(), 0); } - - iterator end() { return iterator(Data(), size()); } - const_iterator end() const { return const_iterator(Data(), size()); } - - reverse_iterator rbegin() { return reverse_iterator(end() - 1); } - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end() - 1); - } - - reverse_iterator rend() { return reverse_iterator(begin() - 1); } - const_reverse_iterator rend() const { - return const_reverse_iterator(begin() - 1); - } - - const_iterator cbegin() const { return begin(); } - - const_iterator cend() const { return end(); } - - const_reverse_iterator crbegin() const { return rbegin(); } - - const_reverse_iterator crend() const { return rend(); } - - // Change elements if you have a non-const pointer to this object. - // Scalars only. See reflection.h, and the documentation. - void Mutate(uoffset_t i, const T &val) { - FLATBUFFERS_ASSERT(i < size()); - WriteScalar(data() + i, val); - } - - // Change an element of a vector of tables (or strings). - // "val" points to the new table/string, as you can obtain from - // e.g. reflection::AddFlatBuffer(). - void MutateOffset(uoffset_t i, const uint8_t *val) { - FLATBUFFERS_ASSERT(i < size()); - static_assert(sizeof(T) == sizeof(uoffset_t), "Unrelated types"); - WriteScalar(data() + i, - static_cast(val - (Data() + i * sizeof(uoffset_t)))); - } - - // Get a mutable pointer to tables/strings inside this vector. - mutable_return_type GetMutableObject(uoffset_t i) const { - FLATBUFFERS_ASSERT(i < size()); - return const_cast(IndirectHelper::Read(Data(), i)); - } - - // The raw data in little endian format. Use with care. - const uint8_t *Data() const { - return reinterpret_cast(&length_ + 1); - } - - uint8_t *Data() { return reinterpret_cast(&length_ + 1); } - - // Similarly, but typed, much like std::vector::data - const T *data() const { return reinterpret_cast(Data()); } - T *data() { return reinterpret_cast(Data()); } - - template return_type LookupByKey(K key) const { - void *search_result = std::bsearch( - &key, Data(), size(), IndirectHelper::element_stride, KeyCompare); - - if (!search_result) { - return nullptr; // Key not found. - } - - const uint8_t *element = reinterpret_cast(search_result); - - return IndirectHelper::Read(element, 0); - } - - protected: - // This class is only used to access pre-existing data. Don't ever - // try to construct these manually. - Vector(); - - uoffset_t length_; - - private: - // This class is a pointer. Copying will therefore create an invalid object. - // Private and unimplemented copy constructor. - Vector(const Vector &); - Vector &operator=(const Vector &); - - template static int KeyCompare(const void *ap, const void *bp) { - const K *key = reinterpret_cast(ap); - const uint8_t *data = reinterpret_cast(bp); - auto table = IndirectHelper::Read(data, 0); - - // std::bsearch compares with the operands transposed, so we negate the - // result here. - return -table->KeyCompareWithValue(*key); - } -}; - -// Represent a vector much like the template above, but in this case we -// don't know what the element types are (used with reflection.h). -class VectorOfAny { - public: - uoffset_t size() const { return EndianScalar(length_); } - - const uint8_t *Data() const { - return reinterpret_cast(&length_ + 1); - } - uint8_t *Data() { return reinterpret_cast(&length_ + 1); } - - protected: - VectorOfAny(); - - uoffset_t length_; - - private: - VectorOfAny(const VectorOfAny &); - VectorOfAny &operator=(const VectorOfAny &); -}; - -#ifndef FLATBUFFERS_CPP98_STL -template -Vector> *VectorCast(Vector> *ptr) { - static_assert(std::is_base_of::value, "Unrelated types"); - return reinterpret_cast> *>(ptr); -} - -template -const Vector> *VectorCast(const Vector> *ptr) { - static_assert(std::is_base_of::value, "Unrelated types"); - return reinterpret_cast> *>(ptr); -} -#endif - -// Convenient helper function to get the length of any vector, regardless -// of whether it is null or not (the field is not set). -template static inline size_t VectorLength(const Vector *v) { - return v ? v->size() : 0; -} - -// This is used as a helper type for accessing arrays. -template class Array { - typedef - typename flatbuffers::integral_constant::value> - scalar_tag; - typedef - typename flatbuffers::conditional::type - IndirectHelperType; - - public: - typedef typename IndirectHelper::return_type return_type; - typedef VectorIterator const_iterator; - typedef VectorReverseIterator const_reverse_iterator; - - FLATBUFFERS_CONSTEXPR uint16_t size() const { return length; } - - return_type Get(uoffset_t i) const { - FLATBUFFERS_ASSERT(i < size()); - return IndirectHelper::Read(Data(), i); - } - - return_type operator[](uoffset_t i) const { return Get(i); } - - // If this is a Vector of enums, T will be its storage type, not the enum - // type. This function makes it convenient to retrieve value with enum - // type E. - template E GetEnum(uoffset_t i) const { - return static_cast(Get(i)); - } - - const_iterator begin() const { return const_iterator(Data(), 0); } - const_iterator end() const { return const_iterator(Data(), size()); } - - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - const_reverse_iterator rend() const { return const_reverse_iterator(end()); } - - const_iterator cbegin() const { return begin(); } - const_iterator cend() const { return end(); } - - const_reverse_iterator crbegin() const { return rbegin(); } - const_reverse_iterator crend() const { return rend(); } - - // Get a mutable pointer to elements inside this array. - // This method used to mutate arrays of structs followed by a @p Mutate - // operation. For primitive types use @p Mutate directly. - // @warning Assignments and reads to/from the dereferenced pointer are not - // automatically converted to the correct endianness. - typename flatbuffers::conditional::type - GetMutablePointer(uoffset_t i) const { - FLATBUFFERS_ASSERT(i < size()); - return const_cast(&data()[i]); - } - - // Change elements if you have a non-const pointer to this object. - void Mutate(uoffset_t i, const T &val) { MutateImpl(scalar_tag(), i, val); } - - // The raw data in little endian format. Use with care. - const uint8_t *Data() const { return data_; } - - uint8_t *Data() { return data_; } - - // Similarly, but typed, much like std::vector::data - const T *data() const { return reinterpret_cast(Data()); } - T *data() { return reinterpret_cast(Data()); } - - protected: - void MutateImpl(flatbuffers::integral_constant, uoffset_t i, - const T &val) { - FLATBUFFERS_ASSERT(i < size()); - WriteScalar(data() + i, val); - } - - void MutateImpl(flatbuffers::integral_constant, uoffset_t i, - const T &val) { - *(GetMutablePointer(i)) = val; - } - - // This class is only used to access pre-existing data. Don't ever - // try to construct these manually. - // 'constexpr' allows us to use 'size()' at compile time. - // @note Must not use 'FLATBUFFERS_CONSTEXPR' here, as const is not allowed on - // a constructor. -#if defined(__cpp_constexpr) - constexpr Array(); -#else - Array(); -#endif - - uint8_t data_[length * sizeof(T)]; - - private: - // This class is a pointer. Copying will therefore create an invalid object. - // Private and unimplemented copy constructor. - Array(const Array &); - Array &operator=(const Array &); -}; - -// Specialization for Array[struct] with access using Offset pointer. -// This specialization used by idl_gen_text.cpp. -template class Array, length> { - static_assert(flatbuffers::is_same::value, "unexpected type T"); - - public: - typedef const void *return_type; - - const uint8_t *Data() const { return data_; } - - // Make idl_gen_text.cpp::PrintContainer happy. - return_type operator[](uoffset_t) const { - FLATBUFFERS_ASSERT(false); - return nullptr; - } - - private: - // This class is only used to access pre-existing data. - Array(); - Array(const Array &); - Array &operator=(const Array &); - - uint8_t data_[1]; -}; - -// Lexicographically compare two strings (possibly containing nulls), and -// return true if the first is less than the second. -static inline bool StringLessThan(const char *a_data, uoffset_t a_size, - const char *b_data, uoffset_t b_size) { - const auto cmp = memcmp(a_data, b_data, (std::min)(a_size, b_size)); - return cmp == 0 ? a_size < b_size : cmp < 0; -} - -struct String : public Vector { - const char *c_str() const { return reinterpret_cast(Data()); } - std::string str() const { return std::string(c_str(), size()); } - - // clang-format off - #ifdef FLATBUFFERS_HAS_STRING_VIEW - flatbuffers::string_view string_view() const { - return flatbuffers::string_view(c_str(), size()); - } - #endif // FLATBUFFERS_HAS_STRING_VIEW - // clang-format on - - bool operator<(const String &o) const { - return StringLessThan(this->data(), this->size(), o.data(), o.size()); - } -}; - -// Convenience function to get std::string from a String returning an empty -// string on null pointer. -static inline std::string GetString(const String *str) { - return str ? str->str() : ""; -} - -// Convenience function to get char* from a String returning an empty string on -// null pointer. -static inline const char *GetCstring(const String *str) { - return str ? str->c_str() : ""; -} - -// Allocator interface. This is flatbuffers-specific and meant only for -// `vector_downward` usage. -class Allocator { - public: - virtual ~Allocator() {} - - // Allocate `size` bytes of memory. - virtual uint8_t *allocate(size_t size) = 0; - - // Deallocate `size` bytes of memory at `p` allocated by this allocator. - virtual void deallocate(uint8_t *p, size_t size) = 0; - - // Reallocate `new_size` bytes of memory, replacing the old region of size - // `old_size` at `p`. In contrast to a normal realloc, this grows downwards, - // and is intended specifcally for `vector_downward` use. - // `in_use_back` and `in_use_front` indicate how much of `old_size` is - // actually in use at each end, and needs to be copied. - virtual uint8_t *reallocate_downward(uint8_t *old_p, size_t old_size, - size_t new_size, size_t in_use_back, - size_t in_use_front) { - FLATBUFFERS_ASSERT(new_size > old_size); // vector_downward only grows - uint8_t *new_p = allocate(new_size); - memcpy_downward(old_p, old_size, new_p, new_size, in_use_back, - in_use_front); - deallocate(old_p, old_size); - return new_p; - } - - protected: - // Called by `reallocate_downward` to copy memory from `old_p` of `old_size` - // to `new_p` of `new_size`. Only memory of size `in_use_front` and - // `in_use_back` will be copied from the front and back of the old memory - // allocation. - void memcpy_downward(uint8_t *old_p, size_t old_size, uint8_t *new_p, - size_t new_size, size_t in_use_back, - size_t in_use_front) { - memcpy(new_p + new_size - in_use_back, old_p + old_size - in_use_back, - in_use_back); - memcpy(new_p, old_p, in_use_front); - } -}; - -// DefaultAllocator uses new/delete to allocate memory regions -class DefaultAllocator : public Allocator { - public: - uint8_t *allocate(size_t size) FLATBUFFERS_OVERRIDE { - return new uint8_t[size]; - } - - void deallocate(uint8_t *p, size_t) FLATBUFFERS_OVERRIDE { delete[] p; } - - static void dealloc(void *p, size_t) { delete[] static_cast(p); } -}; - -// These functions allow for a null allocator to mean use the default allocator, -// as used by DetachedBuffer and vector_downward below. -// This is to avoid having a statically or dynamically allocated default -// allocator, or having to move it between the classes that may own it. -inline uint8_t *Allocate(Allocator *allocator, size_t size) { - return allocator ? allocator->allocate(size) - : DefaultAllocator().allocate(size); -} - -inline void Deallocate(Allocator *allocator, uint8_t *p, size_t size) { - if (allocator) - allocator->deallocate(p, size); - else - DefaultAllocator().deallocate(p, size); -} - -inline uint8_t *ReallocateDownward(Allocator *allocator, uint8_t *old_p, - size_t old_size, size_t new_size, - size_t in_use_back, size_t in_use_front) { - return allocator ? allocator->reallocate_downward(old_p, old_size, new_size, - in_use_back, in_use_front) - : DefaultAllocator().reallocate_downward( - old_p, old_size, new_size, in_use_back, in_use_front); -} - -// DetachedBuffer is a finished flatbuffer memory region, detached from its -// builder. The original memory region and allocator are also stored so that -// the DetachedBuffer can manage the memory lifetime. -class DetachedBuffer { - public: - DetachedBuffer() - : allocator_(nullptr), - own_allocator_(false), - buf_(nullptr), - reserved_(0), - cur_(nullptr), - size_(0) {} - - DetachedBuffer(Allocator *allocator, bool own_allocator, uint8_t *buf, - size_t reserved, uint8_t *cur, size_t sz) - : allocator_(allocator), - own_allocator_(own_allocator), - buf_(buf), - reserved_(reserved), - cur_(cur), - size_(sz) {} - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - DetachedBuffer(DetachedBuffer &&other) - : allocator_(other.allocator_), - own_allocator_(other.own_allocator_), - buf_(other.buf_), - reserved_(other.reserved_), - cur_(other.cur_), - size_(other.size_) { - other.reset(); - } - // clang-format off - #endif // !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - DetachedBuffer &operator=(DetachedBuffer &&other) { - if (this == &other) return *this; - - destroy(); - - allocator_ = other.allocator_; - own_allocator_ = other.own_allocator_; - buf_ = other.buf_; - reserved_ = other.reserved_; - cur_ = other.cur_; - size_ = other.size_; - - other.reset(); - - return *this; - } - // clang-format off - #endif // !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - - ~DetachedBuffer() { destroy(); } - - const uint8_t *data() const { return cur_; } - - uint8_t *data() { return cur_; } - - size_t size() const { return size_; } - - // clang-format off - #if 0 // disabled for now due to the ordering of classes in this header - template - bool Verify() const { - Verifier verifier(data(), size()); - return verifier.Verify(nullptr); - } - - template - const T* GetRoot() const { - return flatbuffers::GetRoot(data()); - } - - template - T* GetRoot() { - return flatbuffers::GetRoot(data()); - } - #endif - // clang-format on - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - // These may change access mode, leave these at end of public section - FLATBUFFERS_DELETE_FUNC(DetachedBuffer(const DetachedBuffer &other)) - FLATBUFFERS_DELETE_FUNC( - DetachedBuffer &operator=(const DetachedBuffer &other)) - // clang-format off - #endif // !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - - protected: - Allocator *allocator_; - bool own_allocator_; - uint8_t *buf_; - size_t reserved_; - uint8_t *cur_; - size_t size_; - - inline void destroy() { - if (buf_) Deallocate(allocator_, buf_, reserved_); - if (own_allocator_ && allocator_) { delete allocator_; } - reset(); - } - - inline void reset() { - allocator_ = nullptr; - own_allocator_ = false; - buf_ = nullptr; - reserved_ = 0; - cur_ = nullptr; - size_ = 0; - } -}; - -// This is a minimal replication of std::vector functionality, -// except growing from higher to lower addresses. i.e push_back() inserts data -// in the lowest address in the vector. -// Since this vector leaves the lower part unused, we support a "scratch-pad" -// that can be stored there for temporary data, to share the allocated space. -// Essentially, this supports 2 std::vectors in a single buffer. -class vector_downward { - public: - explicit vector_downward(size_t initial_size, Allocator *allocator, - bool own_allocator, size_t buffer_minalign) - : allocator_(allocator), - own_allocator_(own_allocator), - initial_size_(initial_size), - buffer_minalign_(buffer_minalign), - reserved_(0), - buf_(nullptr), - cur_(nullptr), - scratch_(nullptr) {} - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - vector_downward(vector_downward &&other) - #else - vector_downward(vector_downward &other) - #endif // defined(FLATBUFFERS_CPP98_STL) - // clang-format on - : allocator_(other.allocator_), - own_allocator_(other.own_allocator_), - initial_size_(other.initial_size_), - buffer_minalign_(other.buffer_minalign_), - reserved_(other.reserved_), - buf_(other.buf_), - cur_(other.cur_), - scratch_(other.scratch_) { - // No change in other.allocator_ - // No change in other.initial_size_ - // No change in other.buffer_minalign_ - other.own_allocator_ = false; - other.reserved_ = 0; - other.buf_ = nullptr; - other.cur_ = nullptr; - other.scratch_ = nullptr; - } - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - vector_downward &operator=(vector_downward &&other) { - // Move construct a temporary and swap idiom - vector_downward temp(std::move(other)); - swap(temp); - return *this; - } - // clang-format off - #endif // defined(FLATBUFFERS_CPP98_STL) - // clang-format on - - ~vector_downward() { - clear_buffer(); - clear_allocator(); - } - - void reset() { - clear_buffer(); - clear(); - } - - void clear() { - if (buf_) { - cur_ = buf_ + reserved_; - } else { - reserved_ = 0; - cur_ = nullptr; - } - clear_scratch(); - } - - void clear_scratch() { scratch_ = buf_; } - - void clear_allocator() { - if (own_allocator_ && allocator_) { delete allocator_; } - allocator_ = nullptr; - own_allocator_ = false; - } - - void clear_buffer() { - if (buf_) Deallocate(allocator_, buf_, reserved_); - buf_ = nullptr; - } - - // Relinquish the pointer to the caller. - uint8_t *release_raw(size_t &allocated_bytes, size_t &offset) { - auto *buf = buf_; - allocated_bytes = reserved_; - offset = static_cast(cur_ - buf_); - - // release_raw only relinquishes the buffer ownership. - // Does not deallocate or reset the allocator. Destructor will do that. - buf_ = nullptr; - clear(); - return buf; - } - - // Relinquish the pointer to the caller. - DetachedBuffer release() { - // allocator ownership (if any) is transferred to DetachedBuffer. - DetachedBuffer fb(allocator_, own_allocator_, buf_, reserved_, cur_, - size()); - if (own_allocator_) { - allocator_ = nullptr; - own_allocator_ = false; - } - buf_ = nullptr; - clear(); - return fb; - } - - size_t ensure_space(size_t len) { - FLATBUFFERS_ASSERT(cur_ >= scratch_ && scratch_ >= buf_); - if (len > static_cast(cur_ - scratch_)) { reallocate(len); } - // Beyond this, signed offsets may not have enough range: - // (FlatBuffers > 2GB not supported). - FLATBUFFERS_ASSERT(size() < FLATBUFFERS_MAX_BUFFER_SIZE); - return len; - } - - inline uint8_t *make_space(size_t len) { - size_t space = ensure_space(len); - cur_ -= space; - return cur_; - } - - // Returns nullptr if using the DefaultAllocator. - Allocator *get_custom_allocator() { return allocator_; } - - uoffset_t size() const { - return static_cast(reserved_ - (cur_ - buf_)); - } - - uoffset_t scratch_size() const { - return static_cast(scratch_ - buf_); - } - - size_t capacity() const { return reserved_; } - - uint8_t *data() const { - FLATBUFFERS_ASSERT(cur_); - return cur_; - } - - uint8_t *scratch_data() const { - FLATBUFFERS_ASSERT(buf_); - return buf_; - } - - uint8_t *scratch_end() const { - FLATBUFFERS_ASSERT(scratch_); - return scratch_; - } - - uint8_t *data_at(size_t offset) const { return buf_ + reserved_ - offset; } - - void push(const uint8_t *bytes, size_t num) { - if (num > 0) { memcpy(make_space(num), bytes, num); } - } - - // Specialized version of push() that avoids memcpy call for small data. - template void push_small(const T &little_endian_t) { - make_space(sizeof(T)); - *reinterpret_cast(cur_) = little_endian_t; - } - - template void scratch_push_small(const T &t) { - ensure_space(sizeof(T)); - *reinterpret_cast(scratch_) = t; - scratch_ += sizeof(T); - } - - // fill() is most frequently called with small byte counts (<= 4), - // which is why we're using loops rather than calling memset. - void fill(size_t zero_pad_bytes) { - make_space(zero_pad_bytes); - for (size_t i = 0; i < zero_pad_bytes; i++) cur_[i] = 0; - } - - // Version for when we know the size is larger. - // Precondition: zero_pad_bytes > 0 - void fill_big(size_t zero_pad_bytes) { - memset(make_space(zero_pad_bytes), 0, zero_pad_bytes); - } - - void pop(size_t bytes_to_remove) { cur_ += bytes_to_remove; } - void scratch_pop(size_t bytes_to_remove) { scratch_ -= bytes_to_remove; } - - void swap(vector_downward &other) { - using std::swap; - swap(allocator_, other.allocator_); - swap(own_allocator_, other.own_allocator_); - swap(initial_size_, other.initial_size_); - swap(buffer_minalign_, other.buffer_minalign_); - swap(reserved_, other.reserved_); - swap(buf_, other.buf_); - swap(cur_, other.cur_); - swap(scratch_, other.scratch_); - } - - void swap_allocator(vector_downward &other) { - using std::swap; - swap(allocator_, other.allocator_); - swap(own_allocator_, other.own_allocator_); - } - - private: - // You shouldn't really be copying instances of this class. - FLATBUFFERS_DELETE_FUNC(vector_downward(const vector_downward &)) - FLATBUFFERS_DELETE_FUNC(vector_downward &operator=(const vector_downward &)) - - Allocator *allocator_; - bool own_allocator_; - size_t initial_size_; - size_t buffer_minalign_; - size_t reserved_; - uint8_t *buf_; - uint8_t *cur_; // Points at location between empty (below) and used (above). - uint8_t *scratch_; // Points to the end of the scratchpad in use. - - void reallocate(size_t len) { - auto old_reserved = reserved_; - auto old_size = size(); - auto old_scratch_size = scratch_size(); - reserved_ += - (std::max)(len, old_reserved ? old_reserved / 2 : initial_size_); - reserved_ = (reserved_ + buffer_minalign_ - 1) & ~(buffer_minalign_ - 1); - if (buf_) { - buf_ = ReallocateDownward(allocator_, buf_, old_reserved, reserved_, - old_size, old_scratch_size); - } else { - buf_ = Allocate(allocator_, reserved_); - } - cur_ = buf_ + reserved_ - old_size; - scratch_ = buf_ + old_scratch_size; - } -}; - -// Converts a Field ID to a virtual table offset. -inline voffset_t FieldIndexToOffset(voffset_t field_id) { - // Should correspond to what EndTable() below builds up. - const int fixed_fields = 2; // Vtable size and Object Size. - return static_cast((field_id + fixed_fields) * sizeof(voffset_t)); -} - -template -const T *data(const std::vector &v) { - // Eventually the returned pointer gets passed down to memcpy, so - // we need it to be non-null to avoid undefined behavior. - static uint8_t t; - return v.empty() ? reinterpret_cast(&t) : &v.front(); -} -template T *data(std::vector &v) { - // Eventually the returned pointer gets passed down to memcpy, so - // we need it to be non-null to avoid undefined behavior. - static uint8_t t; - return v.empty() ? reinterpret_cast(&t) : &v.front(); -} - -/// @endcond - -/// @addtogroup flatbuffers_cpp_api -/// @{ -/// @class FlatBufferBuilder -/// @brief Helper class to hold data needed in creation of a FlatBuffer. -/// To serialize data, you typically call one of the `Create*()` functions in -/// the generated code, which in turn call a sequence of `StartTable`/ -/// `PushElement`/`AddElement`/`EndTable`, or the builtin `CreateString`/ -/// `CreateVector` functions. Do this is depth-first order to build up a tree to -/// the root. `Finish()` wraps up the buffer ready for transport. -class FlatBufferBuilder { - public: - /// @brief Default constructor for FlatBufferBuilder. - /// @param[in] initial_size The initial size of the buffer, in bytes. Defaults - /// to `1024`. - /// @param[in] allocator An `Allocator` to use. If null will use - /// `DefaultAllocator`. - /// @param[in] own_allocator Whether the builder/vector should own the - /// allocator. Defaults to / `false`. - /// @param[in] buffer_minalign Force the buffer to be aligned to the given - /// minimum alignment upon reallocation. Only needed if you intend to store - /// types with custom alignment AND you wish to read the buffer in-place - /// directly after creation. - explicit FlatBufferBuilder( - size_t initial_size = 1024, Allocator *allocator = nullptr, - bool own_allocator = false, - size_t buffer_minalign = AlignOf()) - : buf_(initial_size, allocator, own_allocator, buffer_minalign), - num_field_loc(0), - max_voffset_(0), - nested(false), - finished(false), - minalign_(1), - force_defaults_(false), - dedup_vtables_(true), - string_pool(nullptr) { - EndianCheck(); - } - - // clang-format off - /// @brief Move constructor for FlatBufferBuilder. - #if !defined(FLATBUFFERS_CPP98_STL) - FlatBufferBuilder(FlatBufferBuilder &&other) - #else - FlatBufferBuilder(FlatBufferBuilder &other) - #endif // #if !defined(FLATBUFFERS_CPP98_STL) - : buf_(1024, nullptr, false, AlignOf()), - num_field_loc(0), - max_voffset_(0), - nested(false), - finished(false), - minalign_(1), - force_defaults_(false), - dedup_vtables_(true), - string_pool(nullptr) { - EndianCheck(); - // Default construct and swap idiom. - // Lack of delegating constructors in vs2010 makes it more verbose than needed. - Swap(other); - } - // clang-format on - - // clang-format off - #if !defined(FLATBUFFERS_CPP98_STL) - // clang-format on - /// @brief Move assignment operator for FlatBufferBuilder. - FlatBufferBuilder &operator=(FlatBufferBuilder &&other) { - // Move construct a temporary and swap idiom - FlatBufferBuilder temp(std::move(other)); - Swap(temp); - return *this; - } - // clang-format off - #endif // defined(FLATBUFFERS_CPP98_STL) - // clang-format on - - void Swap(FlatBufferBuilder &other) { - using std::swap; - buf_.swap(other.buf_); - swap(num_field_loc, other.num_field_loc); - swap(max_voffset_, other.max_voffset_); - swap(nested, other.nested); - swap(finished, other.finished); - swap(minalign_, other.minalign_); - swap(force_defaults_, other.force_defaults_); - swap(dedup_vtables_, other.dedup_vtables_); - swap(string_pool, other.string_pool); - } - - ~FlatBufferBuilder() { - if (string_pool) delete string_pool; - } - - void Reset() { - Clear(); // clear builder state - buf_.reset(); // deallocate buffer - } - - /// @brief Reset all the state in this FlatBufferBuilder so it can be reused - /// to construct another buffer. - void Clear() { - ClearOffsets(); - buf_.clear(); - nested = false; - finished = false; - minalign_ = 1; - if (string_pool) string_pool->clear(); - } - - /// @brief The current size of the serialized buffer, counting from the end. - /// @return Returns an `uoffset_t` with the current size of the buffer. - uoffset_t GetSize() const { return buf_.size(); } - - /// @brief Get the serialized buffer (after you call `Finish()`). - /// @return Returns an `uint8_t` pointer to the FlatBuffer data inside the - /// buffer. - uint8_t *GetBufferPointer() const { - Finished(); - return buf_.data(); - } - - /// @brief Get a pointer to an unfinished buffer. - /// @return Returns a `uint8_t` pointer to the unfinished buffer. - uint8_t *GetCurrentBufferPointer() const { return buf_.data(); } - - /// @brief Get the released pointer to the serialized buffer. - /// @warning Do NOT attempt to use this FlatBufferBuilder afterwards! - /// @return A `FlatBuffer` that owns the buffer and its allocator and - /// behaves similar to a `unique_ptr` with a deleter. - FLATBUFFERS_ATTRIBUTE(deprecated("use Release() instead")) - DetachedBuffer ReleaseBufferPointer() { - Finished(); - return buf_.release(); - } - - /// @brief Get the released DetachedBuffer. - /// @return A `DetachedBuffer` that owns the buffer and its allocator. - DetachedBuffer Release() { - Finished(); - return buf_.release(); - } - - /// @brief Get the released pointer to the serialized buffer. - /// @param size The size of the memory block containing - /// the serialized `FlatBuffer`. - /// @param offset The offset from the released pointer where the finished - /// `FlatBuffer` starts. - /// @return A raw pointer to the start of the memory block containing - /// the serialized `FlatBuffer`. - /// @remark If the allocator is owned, it gets deleted when the destructor is - /// called.. - uint8_t *ReleaseRaw(size_t &size, size_t &offset) { - Finished(); - return buf_.release_raw(size, offset); - } - - /// @brief get the minimum alignment this buffer needs to be accessed - /// properly. This is only known once all elements have been written (after - /// you call Finish()). You can use this information if you need to embed - /// a FlatBuffer in some other buffer, such that you can later read it - /// without first having to copy it into its own buffer. - size_t GetBufferMinAlignment() { - Finished(); - return minalign_; - } - - /// @cond FLATBUFFERS_INTERNAL - void Finished() const { - // If you get this CHECK, you're attempting to get access a buffer - // which hasn't been finished yet. Be sure to call - // FlatBufferBuilder::Finish with your root table. - // If you really need to access an unfinished buffer, call - // GetCurrentBufferPointer instead. - FLATBUFFERS_ASSERT(finished); - } - /// @endcond - - /// @brief In order to save space, fields that are set to their default value - /// don't get serialized into the buffer. - /// @param[in] fd When set to `true`, always serializes default values that - /// are set. Optional fields which are not set explicitly, will still not be - /// serialized. - void ForceDefaults(bool fd) { force_defaults_ = fd; } - - /// @brief By default vtables are deduped in order to save space. - /// @param[in] dedup When set to `true`, dedup vtables. - void DedupVtables(bool dedup) { dedup_vtables_ = dedup; } - - /// @cond FLATBUFFERS_INTERNAL - void Pad(size_t num_bytes) { buf_.fill(num_bytes); } - - void TrackMinAlign(size_t elem_size) { - if (elem_size > minalign_) minalign_ = elem_size; - } - - void Align(size_t elem_size) { - TrackMinAlign(elem_size); - buf_.fill(PaddingBytes(buf_.size(), elem_size)); - } - - void PushFlatBuffer(const uint8_t *bytes, size_t size) { - PushBytes(bytes, size); - finished = true; - } - - void PushBytes(const uint8_t *bytes, size_t size) { buf_.push(bytes, size); } - - void PopBytes(size_t amount) { buf_.pop(amount); } - - template void AssertScalarT() { - // The code assumes power of 2 sizes and endian-swap-ability. - static_assert(flatbuffers::is_scalar::value, "T must be a scalar type"); - } - - // Write a single aligned scalar to the buffer - template uoffset_t PushElement(T element) { - AssertScalarT(); - T litle_endian_element = EndianScalar(element); - Align(sizeof(T)); - buf_.push_small(litle_endian_element); - return GetSize(); - } - - template uoffset_t PushElement(Offset off) { - // Special case for offsets: see ReferTo below. - return PushElement(ReferTo(off.o)); - } - - // When writing fields, we track where they are, so we can create correct - // vtables later. - void TrackField(voffset_t field, uoffset_t off) { - FieldLoc fl = { off, field }; - buf_.scratch_push_small(fl); - num_field_loc++; - max_voffset_ = (std::max)(max_voffset_, field); - } - - // Like PushElement, but additionally tracks the field this represents. - template void AddElement(voffset_t field, T e, T def) { - // We don't serialize values equal to the default. - if (IsTheSameAs(e, def) && !force_defaults_) return; - auto off = PushElement(e); - TrackField(field, off); - } - - template void AddOffset(voffset_t field, Offset off) { - if (off.IsNull()) return; // Don't store. - AddElement(field, ReferTo(off.o), static_cast(0)); - } - - template void AddStruct(voffset_t field, const T *structptr) { - if (!structptr) return; // Default, don't store. - Align(AlignOf()); - buf_.push_small(*structptr); - TrackField(field, GetSize()); - } - - void AddStructOffset(voffset_t field, uoffset_t off) { - TrackField(field, off); - } - - // Offsets initially are relative to the end of the buffer (downwards). - // This function converts them to be relative to the current location - // in the buffer (when stored here), pointing upwards. - uoffset_t ReferTo(uoffset_t off) { - // Align to ensure GetSize() below is correct. - Align(sizeof(uoffset_t)); - // Offset must refer to something already in buffer. - FLATBUFFERS_ASSERT(off && off <= GetSize()); - return GetSize() - off + static_cast(sizeof(uoffset_t)); - } - - void NotNested() { - // If you hit this, you're trying to construct a Table/Vector/String - // during the construction of its parent table (between the MyTableBuilder - // and table.Finish(). - // Move the creation of these sub-objects to above the MyTableBuilder to - // not get this CHECK. - // Ignoring this CHECK may appear to work in simple cases, but the reason - // it is here is that storing objects in-line may cause vtable offsets - // to not fit anymore. It also leads to vtable duplication. - FLATBUFFERS_ASSERT(!nested); - // If you hit this, fields were added outside the scope of a table. - FLATBUFFERS_ASSERT(!num_field_loc); - } - - // From generated code (or from the parser), we call StartTable/EndTable - // with a sequence of AddElement calls in between. - uoffset_t StartTable() { - NotNested(); - nested = true; - return GetSize(); - } - - // This finishes one serialized object by generating the vtable if it's a - // table, comparing it against existing vtables, and writing the - // resulting vtable offset. - uoffset_t EndTable(uoffset_t start) { - // If you get this CHECK, a corresponding StartTable wasn't called. - FLATBUFFERS_ASSERT(nested); - // Write the vtable offset, which is the start of any Table. - // We fill it's value later. - auto vtableoffsetloc = PushElement(0); - // Write a vtable, which consists entirely of voffset_t elements. - // It starts with the number of offsets, followed by a type id, followed - // by the offsets themselves. In reverse: - // Include space for the last offset and ensure empty tables have a - // minimum size. - max_voffset_ = - (std::max)(static_cast(max_voffset_ + sizeof(voffset_t)), - FieldIndexToOffset(0)); - buf_.fill_big(max_voffset_); - auto table_object_size = vtableoffsetloc - start; - // Vtable use 16bit offsets. - FLATBUFFERS_ASSERT(table_object_size < 0x10000); - WriteScalar(buf_.data() + sizeof(voffset_t), - static_cast(table_object_size)); - WriteScalar(buf_.data(), max_voffset_); - // Write the offsets into the table - for (auto it = buf_.scratch_end() - num_field_loc * sizeof(FieldLoc); - it < buf_.scratch_end(); it += sizeof(FieldLoc)) { - auto field_location = reinterpret_cast(it); - auto pos = static_cast(vtableoffsetloc - field_location->off); - // If this asserts, it means you've set a field twice. - FLATBUFFERS_ASSERT( - !ReadScalar(buf_.data() + field_location->id)); - WriteScalar(buf_.data() + field_location->id, pos); - } - ClearOffsets(); - auto vt1 = reinterpret_cast(buf_.data()); - auto vt1_size = ReadScalar(vt1); - auto vt_use = GetSize(); - // See if we already have generated a vtable with this exact same - // layout before. If so, make it point to the old one, remove this one. - if (dedup_vtables_) { - for (auto it = buf_.scratch_data(); it < buf_.scratch_end(); - it += sizeof(uoffset_t)) { - auto vt_offset_ptr = reinterpret_cast(it); - auto vt2 = reinterpret_cast(buf_.data_at(*vt_offset_ptr)); - auto vt2_size = ReadScalar(vt2); - if (vt1_size != vt2_size || 0 != memcmp(vt2, vt1, vt1_size)) continue; - vt_use = *vt_offset_ptr; - buf_.pop(GetSize() - vtableoffsetloc); - break; - } - } - // If this is a new vtable, remember it. - if (vt_use == GetSize()) { buf_.scratch_push_small(vt_use); } - // Fill the vtable offset we created above. - // The offset points from the beginning of the object to where the - // vtable is stored. - // Offsets default direction is downward in memory for future format - // flexibility (storing all vtables at the start of the file). - WriteScalar(buf_.data_at(vtableoffsetloc), - static_cast(vt_use) - - static_cast(vtableoffsetloc)); - - nested = false; - return vtableoffsetloc; - } - - FLATBUFFERS_ATTRIBUTE(deprecated("call the version above instead")) - uoffset_t EndTable(uoffset_t start, voffset_t /*numfields*/) { - return EndTable(start); - } - - // This checks a required field has been set in a given table that has - // just been constructed. - template void Required(Offset table, voffset_t field); - - uoffset_t StartStruct(size_t alignment) { - Align(alignment); - return GetSize(); - } - - uoffset_t EndStruct() { return GetSize(); } - - void ClearOffsets() { - buf_.scratch_pop(num_field_loc * sizeof(FieldLoc)); - num_field_loc = 0; - max_voffset_ = 0; - } - - // Aligns such that when "len" bytes are written, an object can be written - // after it with "alignment" without padding. - void PreAlign(size_t len, size_t alignment) { - TrackMinAlign(alignment); - buf_.fill(PaddingBytes(GetSize() + len, alignment)); - } - template void PreAlign(size_t len) { - AssertScalarT(); - PreAlign(len, sizeof(T)); - } - /// @endcond - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const char pointer to the data to be stored as a string. - /// @param[in] len The number of bytes that should be stored from `str`. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(const char *str, size_t len) { - NotNested(); - PreAlign(len + 1); // Always 0-terminated. - buf_.fill(1); - PushBytes(reinterpret_cast(str), len); - PushElement(static_cast(len)); - return Offset(GetSize()); - } - - /// @brief Store a string in the buffer, which is null-terminated. - /// @param[in] str A const char pointer to a C-string to add to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(const char *str) { - return CreateString(str, strlen(str)); - } - - /// @brief Store a string in the buffer, which is null-terminated. - /// @param[in] str A char pointer to a C-string to add to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(char *str) { - return CreateString(str, strlen(str)); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const reference to a std::string to store in the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(const std::string &str) { - return CreateString(str.c_str(), str.length()); - } - - // clang-format off - #ifdef FLATBUFFERS_HAS_STRING_VIEW - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const string_view to copy in to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateString(flatbuffers::string_view str) { - return CreateString(str.data(), str.size()); - } - #endif // FLATBUFFERS_HAS_STRING_VIEW - // clang-format on - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const pointer to a `String` struct to add to the buffer. - /// @return Returns the offset in the buffer where the string starts - Offset CreateString(const String *str) { - return str ? CreateString(str->c_str(), str->size()) : 0; - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// @param[in] str A const reference to a std::string like type with support - /// of T::c_str() and T::length() to store in the buffer. - /// @return Returns the offset in the buffer where the string starts. - template Offset CreateString(const T &str) { - return CreateString(str.c_str(), str.length()); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. - /// @param[in] str A const char pointer to the data to be stored as a string. - /// @param[in] len The number of bytes that should be stored from `str`. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateSharedString(const char *str, size_t len) { - if (!string_pool) - string_pool = new StringOffsetMap(StringOffsetCompare(buf_)); - auto size_before_string = buf_.size(); - // Must first serialize the string, since the set is all offsets into - // buffer. - auto off = CreateString(str, len); - auto it = string_pool->find(off); - // If it exists we reuse existing serialized data! - if (it != string_pool->end()) { - // We can remove the string we serialized. - buf_.pop(buf_.size() - size_before_string); - return *it; - } - // Record this string for future use. - string_pool->insert(off); - return off; - } - - /// @brief Store a string in the buffer, which null-terminated. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. - /// @param[in] str A const char pointer to a C-string to add to the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateSharedString(const char *str) { - return CreateSharedString(str, strlen(str)); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. - /// @param[in] str A const reference to a std::string to store in the buffer. - /// @return Returns the offset in the buffer where the string starts. - Offset CreateSharedString(const std::string &str) { - return CreateSharedString(str.c_str(), str.length()); - } - - /// @brief Store a string in the buffer, which can contain any binary data. - /// If a string with this exact contents has already been serialized before, - /// instead simply returns the offset of the existing string. - /// @param[in] str A const pointer to a `String` struct to add to the buffer. - /// @return Returns the offset in the buffer where the string starts - Offset CreateSharedString(const String *str) { - return CreateSharedString(str->c_str(), str->size()); - } - - /// @cond FLATBUFFERS_INTERNAL - uoffset_t EndVector(size_t len) { - FLATBUFFERS_ASSERT(nested); // Hit if no corresponding StartVector. - nested = false; - return PushElement(static_cast(len)); - } - - void StartVector(size_t len, size_t elemsize) { - NotNested(); - nested = true; - PreAlign(len * elemsize); - PreAlign(len * elemsize, elemsize); // Just in case elemsize > uoffset_t. - } - - // Call this right before StartVector/CreateVector if you want to force the - // alignment to be something different than what the element size would - // normally dictate. - // This is useful when storing a nested_flatbuffer in a vector of bytes, - // or when storing SIMD floats, etc. - void ForceVectorAlignment(size_t len, size_t elemsize, size_t alignment) { - PreAlign(len * elemsize, alignment); - } - - // Similar to ForceVectorAlignment but for String fields. - void ForceStringAlignment(size_t len, size_t alignment) { - PreAlign((len + 1) * sizeof(char), alignment); - } - - /// @endcond - - /// @brief Serialize an array into a FlatBuffer `vector`. - /// @tparam T The data type of the array elements. - /// @param[in] v A pointer to the array of type `T` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template Offset> CreateVector(const T *v, size_t len) { - // If this CHECK hits, you're specifying a template argument that is - // causing the wrong overload to be selected, remove it. - AssertScalarT(); - StartVector(len, sizeof(T)); - // clang-format off - #if FLATBUFFERS_LITTLEENDIAN - PushBytes(reinterpret_cast(v), len * sizeof(T)); - #else - if (sizeof(T) == 1) { - PushBytes(reinterpret_cast(v), len); - } else { - for (auto i = len; i > 0; ) { - PushElement(v[--i]); - } - } - #endif - // clang-format on - return Offset>(EndVector(len)); - } - - template - Offset>> CreateVector(const Offset *v, size_t len) { - StartVector(len, sizeof(Offset)); - for (auto i = len; i > 0;) { PushElement(v[--i]); } - return Offset>>(EndVector(len)); - } - - /// @brief Serialize a `std::vector` into a FlatBuffer `vector`. - /// @tparam T The data type of the `std::vector` elements. - /// @param v A const reference to the `std::vector` to serialize into the - /// buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template Offset> CreateVector(const std::vector &v) { - return CreateVector(data(v), v.size()); - } - - // vector may be implemented using a bit-set, so we can't access it as - // an array. Instead, read elements manually. - // Background: https://isocpp.org/blog/2012/11/on-vectorbool - Offset> CreateVector(const std::vector &v) { - StartVector(v.size(), sizeof(uint8_t)); - for (auto i = v.size(); i > 0;) { - PushElement(static_cast(v[--i])); - } - return Offset>(EndVector(v.size())); - } - - // clang-format off - #ifndef FLATBUFFERS_CPP98_STL - /// @brief Serialize values returned by a function into a FlatBuffer `vector`. - /// This is a convenience function that takes care of iteration for you. - /// @tparam T The data type of the `std::vector` elements. - /// @param f A function that takes the current iteration 0..vector_size-1 and - /// returns any type that you can construct a FlatBuffers vector out of. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template Offset> CreateVector(size_t vector_size, - const std::function &f) { - std::vector elems(vector_size); - for (size_t i = 0; i < vector_size; i++) elems[i] = f(i); - return CreateVector(elems); - } - #endif - // clang-format on - - /// @brief Serialize values returned by a function into a FlatBuffer `vector`. - /// This is a convenience function that takes care of iteration for you. - /// @tparam T The data type of the `std::vector` elements. - /// @param f A function that takes the current iteration 0..vector_size-1, - /// and the state parameter returning any type that you can construct a - /// FlatBuffers vector out of. - /// @param state State passed to f. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVector(size_t vector_size, F f, S *state) { - std::vector elems(vector_size); - for (size_t i = 0; i < vector_size; i++) elems[i] = f(i, state); - return CreateVector(elems); - } - - /// @brief Serialize a `std::vector` into a FlatBuffer `vector`. - /// This is a convenience function for a common case. - /// @param v A const reference to the `std::vector` to serialize into the - /// buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - Offset>> CreateVectorOfStrings( - const std::vector &v) { - std::vector> offsets(v.size()); - for (size_t i = 0; i < v.size(); i++) offsets[i] = CreateString(v[i]); - return CreateVector(offsets); - } - - /// @brief Serialize an array of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @param[in] v A pointer to the array of type `T` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfStructs(const T *v, size_t len) { - StartVector(len * sizeof(T) / AlignOf(), AlignOf()); - PushBytes(reinterpret_cast(v), sizeof(T) * len); - return Offset>(EndVector(len)); - } - - /// @brief Serialize an array of native structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @tparam S The data type of the native struct array elements. - /// @param[in] v A pointer to the array of type `S` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfNativeStructs(const S *v, - size_t len) { - extern T Pack(const S &); - std::vector vv(len); - std::transform(v, v + len, vv.begin(), Pack); - return CreateVectorOfStructs(data(vv), vv.size()); - } - - // clang-format off - #ifndef FLATBUFFERS_CPP98_STL - /// @brief Serialize an array of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @param[in] filler A function that takes the current iteration 0..vector_size-1 - /// and a pointer to the struct that must be filled. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - /// This is mostly useful when flatbuffers are generated with mutation - /// accessors. - template Offset> CreateVectorOfStructs( - size_t vector_size, const std::function &filler) { - T* structs = StartVectorOfStructs(vector_size); - for (size_t i = 0; i < vector_size; i++) { - filler(i, structs); - structs++; - } - return EndVectorOfStructs(vector_size); - } - #endif - // clang-format on - - /// @brief Serialize an array of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the struct array elements. - /// @param[in] f A function that takes the current iteration 0..vector_size-1, - /// a pointer to the struct that must be filled and the state argument. - /// @param[in] state Arbitrary state to pass to f. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - /// This is mostly useful when flatbuffers are generated with mutation - /// accessors. - template - Offset> CreateVectorOfStructs(size_t vector_size, F f, - S *state) { - T *structs = StartVectorOfStructs(vector_size); - for (size_t i = 0; i < vector_size; i++) { - f(i, structs, state); - structs++; - } - return EndVectorOfStructs(vector_size); - } - - /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector`. - /// @tparam T The data type of the `std::vector` struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfStructs( - const std::vector &v) { - return CreateVectorOfStructs(data(v), v.size()); - } - - /// @brief Serialize a `std::vector` of native structs into a FlatBuffer - /// `vector`. - /// @tparam T The data type of the `std::vector` struct elements. - /// @tparam S The data type of the `std::vector` native struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfNativeStructs( - const std::vector &v) { - return CreateVectorOfNativeStructs(data(v), v.size()); - } - - /// @cond FLATBUFFERS_INTERNAL - template struct StructKeyComparator { - bool operator()(const T &a, const T &b) const { - return a.KeyCompareLessThan(&b); - } - - private: - StructKeyComparator &operator=(const StructKeyComparator &); - }; - /// @endcond - - /// @brief Serialize a `std::vector` of structs into a FlatBuffer `vector` - /// in sorted order. - /// @tparam T The data type of the `std::vector` struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfSortedStructs(std::vector *v) { - return CreateVectorOfSortedStructs(data(*v), v->size()); - } - - /// @brief Serialize a `std::vector` of native structs into a FlatBuffer - /// `vector` in sorted order. - /// @tparam T The data type of the `std::vector` struct elements. - /// @tparam S The data type of the `std::vector` native struct elements. - /// @param[in] v A const reference to the `std::vector` of structs to - /// serialize into the buffer as a `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfSortedNativeStructs( - std::vector *v) { - return CreateVectorOfSortedNativeStructs(data(*v), v->size()); - } - - /// @brief Serialize an array of structs into a FlatBuffer `vector` in sorted - /// order. - /// @tparam T The data type of the struct array elements. - /// @param[in] v A pointer to the array of type `T` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfSortedStructs(T *v, size_t len) { - std::sort(v, v + len, StructKeyComparator()); - return CreateVectorOfStructs(v, len); - } - - /// @brief Serialize an array of native structs into a FlatBuffer `vector` in - /// sorted order. - /// @tparam T The data type of the struct array elements. - /// @tparam S The data type of the native struct array elements. - /// @param[in] v A pointer to the array of type `S` to serialize into the - /// buffer as a `vector`. - /// @param[in] len The number of elements to serialize. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset> CreateVectorOfSortedNativeStructs(S *v, - size_t len) { - extern T Pack(const S &); - typedef T (*Pack_t)(const S &); - std::vector vv(len); - std::transform(v, v + len, vv.begin(), static_cast(Pack)); - return CreateVectorOfSortedStructs(vv, len); - } - - /// @cond FLATBUFFERS_INTERNAL - template struct TableKeyComparator { - TableKeyComparator(vector_downward &buf) : buf_(buf) {} - TableKeyComparator(const TableKeyComparator &other) : buf_(other.buf_) {} - bool operator()(const Offset &a, const Offset &b) const { - auto table_a = reinterpret_cast(buf_.data_at(a.o)); - auto table_b = reinterpret_cast(buf_.data_at(b.o)); - return table_a->KeyCompareLessThan(table_b); - } - vector_downward &buf_; - - private: - TableKeyComparator &operator=(const TableKeyComparator &other) { - buf_ = other.buf_; - return *this; - } - }; - /// @endcond - - /// @brief Serialize an array of `table` offsets as a `vector` in the buffer - /// in sorted order. - /// @tparam T The data type that the offset refers to. - /// @param[in] v An array of type `Offset` that contains the `table` - /// offsets to store in the buffer in sorted order. - /// @param[in] len The number of elements to store in the `vector`. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset>> CreateVectorOfSortedTables(Offset *v, - size_t len) { - std::sort(v, v + len, TableKeyComparator(buf_)); - return CreateVector(v, len); - } - - /// @brief Serialize an array of `table` offsets as a `vector` in the buffer - /// in sorted order. - /// @tparam T The data type that the offset refers to. - /// @param[in] v An array of type `Offset` that contains the `table` - /// offsets to store in the buffer in sorted order. - /// @return Returns a typed `Offset` into the serialized data indicating - /// where the vector is stored. - template - Offset>> CreateVectorOfSortedTables( - std::vector> *v) { - return CreateVectorOfSortedTables(data(*v), v->size()); - } - - /// @brief Specialized version of `CreateVector` for non-copying use cases. - /// Write the data any time later to the returned buffer pointer `buf`. - /// @param[in] len The number of elements to store in the `vector`. - /// @param[in] elemsize The size of each element in the `vector`. - /// @param[out] buf A pointer to a `uint8_t` pointer that can be - /// written to at a later time to serialize the data into a `vector` - /// in the buffer. - uoffset_t CreateUninitializedVector(size_t len, size_t elemsize, - uint8_t **buf) { - NotNested(); - StartVector(len, elemsize); - buf_.make_space(len * elemsize); - auto vec_start = GetSize(); - auto vec_end = EndVector(len); - *buf = buf_.data_at(vec_start); - return vec_end; - } - - /// @brief Specialized version of `CreateVector` for non-copying use cases. - /// Write the data any time later to the returned buffer pointer `buf`. - /// @tparam T The data type of the data that will be stored in the buffer - /// as a `vector`. - /// @param[in] len The number of elements to store in the `vector`. - /// @param[out] buf A pointer to a pointer of type `T` that can be - /// written to at a later time to serialize the data into a `vector` - /// in the buffer. - template - Offset> CreateUninitializedVector(size_t len, T **buf) { - AssertScalarT(); - return CreateUninitializedVector(len, sizeof(T), - reinterpret_cast(buf)); - } - - template - Offset> CreateUninitializedVectorOfStructs(size_t len, - T **buf) { - return CreateUninitializedVector(len, sizeof(T), - reinterpret_cast(buf)); - } - - // @brief Create a vector of scalar type T given as input a vector of scalar - // type U, useful with e.g. pre "enum class" enums, or any existing scalar - // data of the wrong type. - template - Offset> CreateVectorScalarCast(const U *v, size_t len) { - AssertScalarT(); - AssertScalarT(); - StartVector(len, sizeof(T)); - for (auto i = len; i > 0;) { PushElement(static_cast(v[--i])); } - return Offset>(EndVector(len)); - } - - /// @brief Write a struct by itself, typically to be part of a union. - template Offset CreateStruct(const T &structobj) { - NotNested(); - Align(AlignOf()); - buf_.push_small(structobj); - return Offset(GetSize()); - } - - /// @brief The length of a FlatBuffer file header. - static const size_t kFileIdentifierLength = 4; - - /// @brief Finish serializing a buffer by writing the root offset. - /// @param[in] file_identifier If a `file_identifier` is given, the buffer - /// will be prefixed with a standard FlatBuffers file header. - template - void Finish(Offset root, const char *file_identifier = nullptr) { - Finish(root.o, file_identifier, false); - } - - /// @brief Finish a buffer with a 32 bit size field pre-fixed (size of the - /// buffer following the size field). These buffers are NOT compatible - /// with standard buffers created by Finish, i.e. you can't call GetRoot - /// on them, you have to use GetSizePrefixedRoot instead. - /// All >32 bit quantities in this buffer will be aligned when the whole - /// size pre-fixed buffer is aligned. - /// These kinds of buffers are useful for creating a stream of FlatBuffers. - template - void FinishSizePrefixed(Offset root, - const char *file_identifier = nullptr) { - Finish(root.o, file_identifier, true); - } - - void SwapBufAllocator(FlatBufferBuilder &other) { - buf_.swap_allocator(other.buf_); - } - - protected: - // You shouldn't really be copying instances of this class. - FlatBufferBuilder(const FlatBufferBuilder &); - FlatBufferBuilder &operator=(const FlatBufferBuilder &); - - void Finish(uoffset_t root, const char *file_identifier, bool size_prefix) { - NotNested(); - buf_.clear_scratch(); - // This will cause the whole buffer to be aligned. - PreAlign((size_prefix ? sizeof(uoffset_t) : 0) + sizeof(uoffset_t) + - (file_identifier ? kFileIdentifierLength : 0), - minalign_); - if (file_identifier) { - FLATBUFFERS_ASSERT(strlen(file_identifier) == kFileIdentifierLength); - PushBytes(reinterpret_cast(file_identifier), - kFileIdentifierLength); - } - PushElement(ReferTo(root)); // Location of root. - if (size_prefix) { PushElement(GetSize()); } - finished = true; - } - - struct FieldLoc { - uoffset_t off; - voffset_t id; - }; - - vector_downward buf_; - - // Accumulating offsets of table members while it is being built. - // We store these in the scratch pad of buf_, after the vtable offsets. - uoffset_t num_field_loc; - // Track how much of the vtable is in use, so we can output the most compact - // possible vtable. - voffset_t max_voffset_; - - // Ensure objects are not nested. - bool nested; - - // Ensure the buffer is finished before it is being accessed. - bool finished; - - size_t minalign_; - - bool force_defaults_; // Serialize values equal to their defaults anyway. - - bool dedup_vtables_; - - struct StringOffsetCompare { - StringOffsetCompare(const vector_downward &buf) : buf_(&buf) {} - bool operator()(const Offset &a, const Offset &b) const { - auto stra = reinterpret_cast(buf_->data_at(a.o)); - auto strb = reinterpret_cast(buf_->data_at(b.o)); - return StringLessThan(stra->data(), stra->size(), strb->data(), - strb->size()); - } - const vector_downward *buf_; - }; - - // For use with CreateSharedString. Instantiated on first use only. - typedef std::set, StringOffsetCompare> StringOffsetMap; - StringOffsetMap *string_pool; - - private: - // Allocates space for a vector of structures. - // Must be completed with EndVectorOfStructs(). - template T *StartVectorOfStructs(size_t vector_size) { - StartVector(vector_size * sizeof(T) / AlignOf(), AlignOf()); - return reinterpret_cast(buf_.make_space(vector_size * sizeof(T))); - } - - // End the vector of structues in the flatbuffers. - // Vector should have previously be started with StartVectorOfStructs(). - template - Offset> EndVectorOfStructs(size_t vector_size) { - return Offset>(EndVector(vector_size)); - } -}; -/// @} - -/// @cond FLATBUFFERS_INTERNAL -// Helpers to get a typed pointer to the root object contained in the buffer. -template T *GetMutableRoot(void *buf) { - EndianCheck(); - return reinterpret_cast( - reinterpret_cast(buf) + - EndianScalar(*reinterpret_cast(buf))); -} - -template const T *GetRoot(const void *buf) { - return GetMutableRoot(const_cast(buf)); -} - -template const T *GetSizePrefixedRoot(const void *buf) { - return GetRoot(reinterpret_cast(buf) + sizeof(uoffset_t)); -} - -/// Helpers to get a typed pointer to objects that are currently being built. -/// @warning Creating new objects will lead to reallocations and invalidates -/// the pointer! -template -T *GetMutableTemporaryPointer(FlatBufferBuilder &fbb, Offset offset) { - return reinterpret_cast(fbb.GetCurrentBufferPointer() + fbb.GetSize() - - offset.o); -} - -template -const T *GetTemporaryPointer(FlatBufferBuilder &fbb, Offset offset) { - return GetMutableTemporaryPointer(fbb, offset); -} - -/// @brief Get a pointer to the the file_identifier section of the buffer. -/// @return Returns a const char pointer to the start of the file_identifier -/// characters in the buffer. The returned char * has length -/// 'flatbuffers::FlatBufferBuilder::kFileIdentifierLength'. -/// This function is UNDEFINED for FlatBuffers whose schema does not include -/// a file_identifier (likely points at padding or the start of a the root -/// vtable). -inline const char *GetBufferIdentifier(const void *buf, - bool size_prefixed = false) { - return reinterpret_cast(buf) + - ((size_prefixed) ? 2 * sizeof(uoffset_t) : sizeof(uoffset_t)); -} - -// Helper to see if the identifier in a buffer has the expected value. -inline bool BufferHasIdentifier(const void *buf, const char *identifier, - bool size_prefixed = false) { - return strncmp(GetBufferIdentifier(buf, size_prefixed), identifier, - FlatBufferBuilder::kFileIdentifierLength) == 0; -} - -// Helper class to verify the integrity of a FlatBuffer -class Verifier FLATBUFFERS_FINAL_CLASS { - public: - Verifier(const uint8_t *buf, size_t buf_len, uoffset_t _max_depth = 64, - uoffset_t _max_tables = 1000000, bool _check_alignment = true) - : buf_(buf), - size_(buf_len), - depth_(0), - max_depth_(_max_depth), - num_tables_(0), - max_tables_(_max_tables), - upper_bound_(0), - check_alignment_(_check_alignment) { - FLATBUFFERS_ASSERT(size_ < FLATBUFFERS_MAX_BUFFER_SIZE); - } - - // Central location where any verification failures register. - bool Check(bool ok) const { - // clang-format off - #ifdef FLATBUFFERS_DEBUG_VERIFICATION_FAILURE - FLATBUFFERS_ASSERT(ok); - #endif - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - if (!ok) - upper_bound_ = 0; - #endif - // clang-format on - return ok; - } - - // Verify any range within the buffer. - bool Verify(size_t elem, size_t elem_len) const { - // clang-format off - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - auto upper_bound = elem + elem_len; - if (upper_bound_ < upper_bound) - upper_bound_ = upper_bound; - #endif - // clang-format on - return Check(elem_len < size_ && elem <= size_ - elem_len); - } - - template bool VerifyAlignment(size_t elem) const { - return Check((elem & (sizeof(T) - 1)) == 0 || !check_alignment_); - } - - // Verify a range indicated by sizeof(T). - template bool Verify(size_t elem) const { - return VerifyAlignment(elem) && Verify(elem, sizeof(T)); - } - - bool VerifyFromPointer(const uint8_t *p, size_t len) { - auto o = static_cast(p - buf_); - return Verify(o, len); - } - - // Verify relative to a known-good base pointer. - bool Verify(const uint8_t *base, voffset_t elem_off, size_t elem_len) const { - return Verify(static_cast(base - buf_) + elem_off, elem_len); - } - - template - bool Verify(const uint8_t *base, voffset_t elem_off) const { - return Verify(static_cast(base - buf_) + elem_off, sizeof(T)); - } - - // Verify a pointer (may be NULL) of a table type. - template bool VerifyTable(const T *table) { - return !table || table->Verify(*this); - } - - // Verify a pointer (may be NULL) of any vector type. - template bool VerifyVector(const Vector *vec) const { - return !vec || VerifyVectorOrString(reinterpret_cast(vec), - sizeof(T)); - } - - // Verify a pointer (may be NULL) of a vector to struct. - template bool VerifyVector(const Vector *vec) const { - return VerifyVector(reinterpret_cast *>(vec)); - } - - // Verify a pointer (may be NULL) to string. - bool VerifyString(const String *str) const { - size_t end; - return !str || (VerifyVectorOrString(reinterpret_cast(str), - 1, &end) && - Verify(end, 1) && // Must have terminator - Check(buf_[end] == '\0')); // Terminating byte must be 0. - } - - // Common code between vectors and strings. - bool VerifyVectorOrString(const uint8_t *vec, size_t elem_size, - size_t *end = nullptr) const { - auto veco = static_cast(vec - buf_); - // Check we can read the size field. - if (!Verify(veco)) return false; - // Check the whole array. If this is a string, the byte past the array - // must be 0. - auto size = ReadScalar(vec); - auto max_elems = FLATBUFFERS_MAX_BUFFER_SIZE / elem_size; - if (!Check(size < max_elems)) - return false; // Protect against byte_size overflowing. - auto byte_size = sizeof(size) + elem_size * size; - if (end) *end = veco + byte_size; - return Verify(veco, byte_size); - } - - // Special case for string contents, after the above has been called. - bool VerifyVectorOfStrings(const Vector> *vec) const { - if (vec) { - for (uoffset_t i = 0; i < vec->size(); i++) { - if (!VerifyString(vec->Get(i))) return false; - } - } - return true; - } - - // Special case for table contents, after the above has been called. - template bool VerifyVectorOfTables(const Vector> *vec) { - if (vec) { - for (uoffset_t i = 0; i < vec->size(); i++) { - if (!vec->Get(i)->Verify(*this)) return false; - } - } - return true; - } - - __supress_ubsan__("unsigned-integer-overflow") bool VerifyTableStart( - const uint8_t *table) { - // Check the vtable offset. - auto tableo = static_cast(table - buf_); - if (!Verify(tableo)) return false; - // This offset may be signed, but doing the subtraction unsigned always - // gives the result we want. - auto vtableo = tableo - static_cast(ReadScalar(table)); - // Check the vtable size field, then check vtable fits in its entirety. - return VerifyComplexity() && Verify(vtableo) && - VerifyAlignment(ReadScalar(buf_ + vtableo)) && - Verify(vtableo, ReadScalar(buf_ + vtableo)); - } - - template - bool VerifyBufferFromStart(const char *identifier, size_t start) { - if (identifier && (size_ < 2 * sizeof(flatbuffers::uoffset_t) || - !BufferHasIdentifier(buf_ + start, identifier))) { - return false; - } - - // Call T::Verify, which must be in the generated code for this type. - auto o = VerifyOffset(start); - return o && reinterpret_cast(buf_ + start + o)->Verify(*this) - // clang-format off - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - && GetComputedSize() - #endif - ; - // clang-format on - } - - // Verify this whole buffer, starting with root type T. - template bool VerifyBuffer() { return VerifyBuffer(nullptr); } - - template bool VerifyBuffer(const char *identifier) { - return VerifyBufferFromStart(identifier, 0); - } - - template bool VerifySizePrefixedBuffer(const char *identifier) { - return Verify(0U) && - ReadScalar(buf_) == size_ - sizeof(uoffset_t) && - VerifyBufferFromStart(identifier, sizeof(uoffset_t)); - } - - uoffset_t VerifyOffset(size_t start) const { - if (!Verify(start)) return 0; - auto o = ReadScalar(buf_ + start); - // May not point to itself. - if (!Check(o != 0)) return 0; - // Can't wrap around / buffers are max 2GB. - if (!Check(static_cast(o) >= 0)) return 0; - // Must be inside the buffer to create a pointer from it (pointer outside - // buffer is UB). - if (!Verify(start + o, 1)) return 0; - return o; - } - - uoffset_t VerifyOffset(const uint8_t *base, voffset_t start) const { - return VerifyOffset(static_cast(base - buf_) + start); - } - - // Called at the start of a table to increase counters measuring data - // structure depth and amount, and possibly bails out with false if - // limits set by the constructor have been hit. Needs to be balanced - // with EndTable(). - bool VerifyComplexity() { - depth_++; - num_tables_++; - return Check(depth_ <= max_depth_ && num_tables_ <= max_tables_); - } - - // Called at the end of a table to pop the depth count. - bool EndTable() { - depth_--; - return true; - } - - // Returns the message size in bytes - size_t GetComputedSize() const { - // clang-format off - #ifdef FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE - uintptr_t size = upper_bound_; - // Align the size to uoffset_t - size = (size - 1 + sizeof(uoffset_t)) & ~(sizeof(uoffset_t) - 1); - return (size > size_) ? 0 : size; - #else - // Must turn on FLATBUFFERS_TRACK_VERIFIER_BUFFER_SIZE for this to work. - (void)upper_bound_; - FLATBUFFERS_ASSERT(false); - return 0; - #endif - // clang-format on - } - - private: - const uint8_t *buf_; - size_t size_; - uoffset_t depth_; - uoffset_t max_depth_; - uoffset_t num_tables_; - uoffset_t max_tables_; - mutable size_t upper_bound_; - bool check_alignment_; -}; - -// Convenient way to bundle a buffer and its length, to pass it around -// typed by its root. -// A BufferRef does not own its buffer. -struct BufferRefBase {}; // for std::is_base_of -template struct BufferRef : BufferRefBase { - BufferRef() : buf(nullptr), len(0), must_free(false) {} - BufferRef(uint8_t *_buf, uoffset_t _len) - : buf(_buf), len(_len), must_free(false) {} - - ~BufferRef() { - if (must_free) free(buf); - } - - const T *GetRoot() const { return flatbuffers::GetRoot(buf); } - - bool Verify() { - Verifier verifier(buf, len); - return verifier.VerifyBuffer(nullptr); - } - - uint8_t *buf; - uoffset_t len; - bool must_free; -}; - -// "structs" are flat structures that do not have an offset table, thus -// always have all members present and do not support forwards/backwards -// compatible extensions. - -class Struct FLATBUFFERS_FINAL_CLASS { - public: - template T GetField(uoffset_t o) const { - return ReadScalar(&data_[o]); - } - - template T GetStruct(uoffset_t o) const { - return reinterpret_cast(&data_[o]); - } - - const uint8_t *GetAddressOf(uoffset_t o) const { return &data_[o]; } - uint8_t *GetAddressOf(uoffset_t o) { return &data_[o]; } - - private: - // private constructor & copy constructor: you obtain instances of this - // class by pointing to existing data only - Struct(); - Struct(const Struct &); - Struct &operator=(const Struct &); - - uint8_t data_[1]; -}; - -// "tables" use an offset table (possibly shared) that allows fields to be -// omitted and added at will, but uses an extra indirection to read. -class Table { - public: - const uint8_t *GetVTable() const { - return data_ - ReadScalar(data_); - } - - // This gets the field offset for any of the functions below it, or 0 - // if the field was not present. - voffset_t GetOptionalFieldOffset(voffset_t field) const { - // The vtable offset is always at the start. - auto vtable = GetVTable(); - // The first element is the size of the vtable (fields + type id + itself). - auto vtsize = ReadScalar(vtable); - // If the field we're accessing is outside the vtable, we're reading older - // data, so it's the same as if the offset was 0 (not present). - return field < vtsize ? ReadScalar(vtable + field) : 0; - } - - template T GetField(voffset_t field, T defaultval) const { - auto field_offset = GetOptionalFieldOffset(field); - return field_offset ? ReadScalar(data_ + field_offset) : defaultval; - } - - template P GetPointer(voffset_t field) { - auto field_offset = GetOptionalFieldOffset(field); - auto p = data_ + field_offset; - return field_offset ? reinterpret_cast

(p + ReadScalar(p)) - : nullptr; - } - template P GetPointer(voffset_t field) const { - return const_cast(this)->GetPointer

(field); - } - - template P GetStruct(voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - auto p = const_cast(data_ + field_offset); - return field_offset ? reinterpret_cast

(p) : nullptr; - } - - template bool SetField(voffset_t field, T val, T def) { - auto field_offset = GetOptionalFieldOffset(field); - if (!field_offset) return IsTheSameAs(val, def); - WriteScalar(data_ + field_offset, val); - return true; - } - - bool SetPointer(voffset_t field, const uint8_t *val) { - auto field_offset = GetOptionalFieldOffset(field); - if (!field_offset) return false; - WriteScalar(data_ + field_offset, - static_cast(val - (data_ + field_offset))); - return true; - } - - uint8_t *GetAddressOf(voffset_t field) { - auto field_offset = GetOptionalFieldOffset(field); - return field_offset ? data_ + field_offset : nullptr; - } - const uint8_t *GetAddressOf(voffset_t field) const { - return const_cast

(this)->GetAddressOf(field); - } - - bool CheckField(voffset_t field) const { - return GetOptionalFieldOffset(field) != 0; - } - - // Verify the vtable of this table. - // Call this once per table, followed by VerifyField once per field. - bool VerifyTableStart(Verifier &verifier) const { - return verifier.VerifyTableStart(data_); - } - - // Verify a particular field. - template - bool VerifyField(const Verifier &verifier, voffset_t field) const { - // Calling GetOptionalFieldOffset should be safe now thanks to - // VerifyTable(). - auto field_offset = GetOptionalFieldOffset(field); - // Check the actual field. - return !field_offset || verifier.Verify(data_, field_offset); - } - - // VerifyField for required fields. - template - bool VerifyFieldRequired(const Verifier &verifier, voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - return verifier.Check(field_offset != 0) && - verifier.Verify(data_, field_offset); - } - - // Versions for offsets. - bool VerifyOffset(const Verifier &verifier, voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - return !field_offset || verifier.VerifyOffset(data_, field_offset); - } - - bool VerifyOffsetRequired(const Verifier &verifier, voffset_t field) const { - auto field_offset = GetOptionalFieldOffset(field); - return verifier.Check(field_offset != 0) && - verifier.VerifyOffset(data_, field_offset); - } - - private: - // private constructor & copy constructor: you obtain instances of this - // class by pointing to existing data only - Table(); - Table(const Table &other); - Table &operator=(const Table &); - - uint8_t data_[1]; -}; - -template -void FlatBufferBuilder::Required(Offset table, voffset_t field) { - auto table_ptr = reinterpret_cast(buf_.data_at(table.o)); - bool ok = table_ptr->GetOptionalFieldOffset(field) != 0; - // If this fails, the caller will show what field needs to be set. - FLATBUFFERS_ASSERT(ok); - (void)ok; -} - -/// @brief This can compute the start of a FlatBuffer from a root pointer, i.e. -/// it is the opposite transformation of GetRoot(). -/// This may be useful if you want to pass on a root and have the recipient -/// delete the buffer afterwards. -inline const uint8_t *GetBufferStartFromRootPointer(const void *root) { - auto table = reinterpret_cast(root); - auto vtable = table->GetVTable(); - // Either the vtable is before the root or after the root. - auto start = (std::min)(vtable, reinterpret_cast(root)); - // Align to at least sizeof(uoffset_t). - start = reinterpret_cast(reinterpret_cast(start) & - ~(sizeof(uoffset_t) - 1)); - // Additionally, there may be a file_identifier in the buffer, and the root - // offset. The buffer may have been aligned to any size between - // sizeof(uoffset_t) and FLATBUFFERS_MAX_ALIGNMENT (see "force_align"). - // Sadly, the exact alignment is only known when constructing the buffer, - // since it depends on the presence of values with said alignment properties. - // So instead, we simply look at the next uoffset_t values (root, - // file_identifier, and alignment padding) to see which points to the root. - // None of the other values can "impersonate" the root since they will either - // be 0 or four ASCII characters. - static_assert(FlatBufferBuilder::kFileIdentifierLength == sizeof(uoffset_t), - "file_identifier is assumed to be the same size as uoffset_t"); - for (auto possible_roots = FLATBUFFERS_MAX_ALIGNMENT / sizeof(uoffset_t) + 1; - possible_roots; possible_roots--) { - start -= sizeof(uoffset_t); - if (ReadScalar(start) + start == - reinterpret_cast(root)) - return start; - } - // We didn't find the root, either the "root" passed isn't really a root, - // or the buffer is corrupt. - // CHECK, because calling this function with bad data may cause reads - // outside of buffer boundaries. - FLATBUFFERS_ASSERT(false); - return nullptr; -} - -/// @brief This return the prefixed size of a FlatBuffer. -inline uoffset_t GetPrefixedSize(const uint8_t *buf) { - return ReadScalar(buf); -} - -// Base class for native objects (FlatBuffer data de-serialized into native -// C++ data structures). -// Contains no functionality, purely documentative. -struct NativeTable {}; - -/// @brief Function types to be used with resolving hashes into objects and -/// back again. The resolver gets a pointer to a field inside an object API -/// object that is of the type specified in the schema using the attribute -/// `cpp_type` (it is thus important whatever you write to this address -/// matches that type). The value of this field is initially null, so you -/// may choose to implement a delayed binding lookup using this function -/// if you wish. The resolver does the opposite lookup, for when the object -/// is being serialized again. -typedef uint64_t hash_value_t; -// clang-format off -#ifdef FLATBUFFERS_CPP98_STL - typedef void (*resolver_function_t)(void **pointer_adr, hash_value_t hash); - typedef hash_value_t (*rehasher_function_t)(void *pointer); -#else - typedef std::function - resolver_function_t; - typedef std::function rehasher_function_t; -#endif -// clang-format on - -// Helper function to test if a field is present, using any of the field -// enums in the generated code. -// `table` must be a generated table type. Since this is a template parameter, -// this is not typechecked to be a subclass of Table, so beware! -// Note: this function will return false for fields equal to the default -// value, since they're not stored in the buffer (unless force_defaults was -// used). -template -bool IsFieldPresent(const T *table, typename T::FlatBuffersVTableOffset field) { - // Cast, since Table is a private baseclass of any table types. - return reinterpret_cast(table)->CheckField( - static_cast(field)); -} - -// Utility function for reverse lookups on the EnumNames*() functions -// (in the generated C++ code) -// names must be NULL terminated. -inline int LookupEnum(const char **names, const char *name) { - for (const char **p = names; *p; p++) - if (!strcmp(*p, name)) return static_cast(p - names); - return -1; -} - -// These macros allow us to layout a struct with a guarantee that they'll end -// up looking the same on different compilers and platforms. -// It does this by disallowing the compiler to do any padding, and then -// does padding itself by inserting extra padding fields that make every -// element aligned to its own size. -// Additionally, it manually sets the alignment of the struct as a whole, -// which is typically its largest element, or a custom size set in the schema -// by the force_align attribute. -// These are used in the generated code only. - -// clang-format off -#if defined(_MSC_VER) - #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \ - __pragma(pack(1)) \ - struct __declspec(align(alignment)) - #define FLATBUFFERS_STRUCT_END(name, size) \ - __pragma(pack()) \ - static_assert(sizeof(name) == size, "compiler breaks packing rules") -#elif defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__) - #define FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(alignment) \ - _Pragma("pack(1)") \ - struct __attribute__((aligned(alignment))) - #define FLATBUFFERS_STRUCT_END(name, size) \ - _Pragma("pack()") \ - static_assert(sizeof(name) == size, "compiler breaks packing rules") -#else - #error Unknown compiler, please define structure alignment macros -#endif -// clang-format on - -// Minimal reflection via code generation. -// Besides full-fat reflection (see reflection.h) and parsing/printing by -// loading schemas (see idl.h), we can also have code generation for mimimal -// reflection data which allows pretty-printing and other uses without needing -// a schema or a parser. -// Generate code with --reflect-types (types only) or --reflect-names (names -// also) to enable. -// See minireflect.h for utilities using this functionality. - -// These types are organized slightly differently as the ones in idl.h. -enum SequenceType { ST_TABLE, ST_STRUCT, ST_UNION, ST_ENUM }; - -// Scalars have the same order as in idl.h -// clang-format off -#define FLATBUFFERS_GEN_ELEMENTARY_TYPES(ET) \ - ET(ET_UTYPE) \ - ET(ET_BOOL) \ - ET(ET_CHAR) \ - ET(ET_UCHAR) \ - ET(ET_SHORT) \ - ET(ET_USHORT) \ - ET(ET_INT) \ - ET(ET_UINT) \ - ET(ET_LONG) \ - ET(ET_ULONG) \ - ET(ET_FLOAT) \ - ET(ET_DOUBLE) \ - ET(ET_STRING) \ - ET(ET_SEQUENCE) // See SequenceType. - -enum ElementaryType { - #define FLATBUFFERS_ET(E) E, - FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET) - #undef FLATBUFFERS_ET -}; - -inline const char * const *ElementaryTypeNames() { - static const char * const names[] = { - #define FLATBUFFERS_ET(E) #E, - FLATBUFFERS_GEN_ELEMENTARY_TYPES(FLATBUFFERS_ET) - #undef FLATBUFFERS_ET - }; - return names; -} -// clang-format on - -// Basic type info cost just 16bits per field! -struct TypeCode { - uint16_t base_type : 4; // ElementaryType - uint16_t is_vector : 1; - int16_t sequence_ref : 11; // Index into type_refs below, or -1 for none. -}; - -static_assert(sizeof(TypeCode) == 2, "TypeCode"); - -struct TypeTable; - -// Signature of the static method present in each type. -typedef const TypeTable *(*TypeFunction)(); - -struct TypeTable { - SequenceType st; - size_t num_elems; // of type_codes, values, names (but not type_refs). - const TypeCode *type_codes; // num_elems count - const TypeFunction *type_refs; // less than num_elems entries (see TypeCode). - const int64_t *values; // Only set for non-consecutive enum/union or structs. - const char *const *names; // Only set if compiled with --reflect-names. -}; - -// String which identifies the current version of FlatBuffers. -// flatbuffer_version_string is used by Google developers to identify which -// applications uploaded to Google Play are using this library. This allows -// the development team at Google to determine the popularity of the library. -// How it works: Applications that are uploaded to the Google Play Store are -// scanned for this version string. We track which applications are using it -// to measure popularity. You are free to remove it (of course) but we would -// appreciate if you left it in. - -// Weak linkage is culled by VS & doesn't work on cygwin. -// clang-format off -#if !defined(_WIN32) && !defined(__CYGWIN__) - -extern volatile __attribute__((weak)) const char *flatbuffer_version_string; -volatile __attribute__((weak)) const char *flatbuffer_version_string = - "FlatBuffers " - FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MAJOR) "." - FLATBUFFERS_STRING(FLATBUFFERS_VERSION_MINOR) "." - FLATBUFFERS_STRING(FLATBUFFERS_VERSION_REVISION); - -#endif // !defined(_WIN32) && !defined(__CYGWIN__) - -#define FLATBUFFERS_DEFINE_BITMASK_OPERATORS(E, T)\ - inline E operator | (E lhs, E rhs){\ - return E(T(lhs) | T(rhs));\ - }\ - inline E operator & (E lhs, E rhs){\ - return E(T(lhs) & T(rhs));\ - }\ - inline E operator ^ (E lhs, E rhs){\ - return E(T(lhs) ^ T(rhs));\ - }\ - inline E operator ~ (E lhs){\ - return E(~T(lhs));\ - }\ - inline E operator |= (E &lhs, E rhs){\ - lhs = lhs | rhs;\ - return lhs;\ - }\ - inline E operator &= (E &lhs, E rhs){\ - lhs = lhs & rhs;\ - return lhs;\ - }\ - inline E operator ^= (E &lhs, E rhs){\ - lhs = lhs ^ rhs;\ - return lhs;\ - }\ - inline bool operator !(E rhs) \ - {\ - return !bool(T(rhs)); \ - } -/// @endcond -} // namespace flatbuffers - -// clang-format on - -#endif // FLATBUFFERS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/flexbuffers.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/flexbuffers.h deleted file mode 100644 index 9d67c0e8..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/flexbuffers.h +++ /dev/null @@ -1,1617 +0,0 @@ -/* - * Copyright 2017 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_FLEXBUFFERS_H_ -#define FLATBUFFERS_FLEXBUFFERS_H_ - -#include -// Used to select STL variant. -#include "flatbuffers/base.h" -// We use the basic binary writing functions from the regular FlatBuffers. -#include "flatbuffers/util.h" - -#ifdef _MSC_VER -# include -#endif - -#if defined(_MSC_VER) -# pragma warning(push) -# pragma warning(disable : 4127) // C4127: conditional expression is constant -#endif - -namespace flexbuffers { - -class Reference; -class Map; - -// These are used in the lower 2 bits of a type field to determine the size of -// the elements (and or size field) of the item pointed to (e.g. vector). -enum BitWidth { - BIT_WIDTH_8 = 0, - BIT_WIDTH_16 = 1, - BIT_WIDTH_32 = 2, - BIT_WIDTH_64 = 3, -}; - -// These are used as the upper 6 bits of a type field to indicate the actual -// type. -enum Type { - FBT_NULL = 0, - FBT_INT = 1, - FBT_UINT = 2, - FBT_FLOAT = 3, - // Types above stored inline, types below store an offset. - FBT_KEY = 4, - FBT_STRING = 5, - FBT_INDIRECT_INT = 6, - FBT_INDIRECT_UINT = 7, - FBT_INDIRECT_FLOAT = 8, - FBT_MAP = 9, - FBT_VECTOR = 10, // Untyped. - FBT_VECTOR_INT = 11, // Typed any size (stores no type table). - FBT_VECTOR_UINT = 12, - FBT_VECTOR_FLOAT = 13, - FBT_VECTOR_KEY = 14, - // DEPRECATED, use FBT_VECTOR or FBT_VECTOR_KEY instead. - // Read test.cpp/FlexBuffersDeprecatedTest() for details on why. - FBT_VECTOR_STRING_DEPRECATED = 15, - FBT_VECTOR_INT2 = 16, // Typed tuple (no type table, no size field). - FBT_VECTOR_UINT2 = 17, - FBT_VECTOR_FLOAT2 = 18, - FBT_VECTOR_INT3 = 19, // Typed triple (no type table, no size field). - FBT_VECTOR_UINT3 = 20, - FBT_VECTOR_FLOAT3 = 21, - FBT_VECTOR_INT4 = 22, // Typed quad (no type table, no size field). - FBT_VECTOR_UINT4 = 23, - FBT_VECTOR_FLOAT4 = 24, - FBT_BLOB = 25, - FBT_BOOL = 26, - FBT_VECTOR_BOOL = - 36, // To Allow the same type of conversion of type to vector type -}; - -inline bool IsInline(Type t) { return t <= FBT_FLOAT || t == FBT_BOOL; } - -inline bool IsTypedVectorElementType(Type t) { - return (t >= FBT_INT && t <= FBT_STRING) || t == FBT_BOOL; -} - -inline bool IsTypedVector(Type t) { - return (t >= FBT_VECTOR_INT && t <= FBT_VECTOR_STRING_DEPRECATED) || - t == FBT_VECTOR_BOOL; -} - -inline bool IsFixedTypedVector(Type t) { - return t >= FBT_VECTOR_INT2 && t <= FBT_VECTOR_FLOAT4; -} - -inline Type ToTypedVector(Type t, size_t fixed_len = 0) { - FLATBUFFERS_ASSERT(IsTypedVectorElementType(t)); - switch (fixed_len) { - case 0: return static_cast(t - FBT_INT + FBT_VECTOR_INT); - case 2: return static_cast(t - FBT_INT + FBT_VECTOR_INT2); - case 3: return static_cast(t - FBT_INT + FBT_VECTOR_INT3); - case 4: return static_cast(t - FBT_INT + FBT_VECTOR_INT4); - default: FLATBUFFERS_ASSERT(0); return FBT_NULL; - } -} - -inline Type ToTypedVectorElementType(Type t) { - FLATBUFFERS_ASSERT(IsTypedVector(t)); - return static_cast(t - FBT_VECTOR_INT + FBT_INT); -} - -inline Type ToFixedTypedVectorElementType(Type t, uint8_t *len) { - FLATBUFFERS_ASSERT(IsFixedTypedVector(t)); - auto fixed_type = t - FBT_VECTOR_INT2; - *len = static_cast(fixed_type / 3 + - 2); // 3 types each, starting from length 2. - return static_cast(fixed_type % 3 + FBT_INT); -} - -// TODO: implement proper support for 8/16bit floats, or decide not to -// support them. -typedef int16_t half; -typedef int8_t quarter; - -// TODO: can we do this without conditionals using intrinsics or inline asm -// on some platforms? Given branch prediction the method below should be -// decently quick, but it is the most frequently executed function. -// We could do an (unaligned) 64-bit read if we ifdef out the platforms for -// which that doesn't work (or where we'd read into un-owned memory). -template -R ReadSizedScalar(const uint8_t *data, uint8_t byte_width) { - return byte_width < 4 - ? (byte_width < 2 - ? static_cast(flatbuffers::ReadScalar(data)) - : static_cast(flatbuffers::ReadScalar(data))) - : (byte_width < 8 - ? static_cast(flatbuffers::ReadScalar(data)) - : static_cast(flatbuffers::ReadScalar(data))); -} - -inline int64_t ReadInt64(const uint8_t *data, uint8_t byte_width) { - return ReadSizedScalar( - data, byte_width); -} - -inline uint64_t ReadUInt64(const uint8_t *data, uint8_t byte_width) { - // This is the "hottest" function (all offset lookups use this), so worth - // optimizing if possible. - // TODO: GCC apparently replaces memcpy by a rep movsb, but only if count is a - // constant, which here it isn't. Test if memcpy is still faster than - // the conditionals in ReadSizedScalar. Can also use inline asm. - // clang-format off - #if defined(_MSC_VER) && (defined(_M_X64) || defined _M_IX86) - uint64_t u = 0; - __movsb(reinterpret_cast(&u), - reinterpret_cast(data), byte_width); - return flatbuffers::EndianScalar(u); - #else - return ReadSizedScalar( - data, byte_width); - #endif - // clang-format on -} - -inline double ReadDouble(const uint8_t *data, uint8_t byte_width) { - return ReadSizedScalar(data, - byte_width); -} - -inline const uint8_t *Indirect(const uint8_t *offset, uint8_t byte_width) { - return offset - ReadUInt64(offset, byte_width); -} - -template const uint8_t *Indirect(const uint8_t *offset) { - return offset - flatbuffers::ReadScalar(offset); -} - -inline BitWidth WidthU(uint64_t u) { -#define FLATBUFFERS_GET_FIELD_BIT_WIDTH(value, width) \ - { \ - if (!((u) & ~((1ULL << (width)) - 1ULL))) return BIT_WIDTH_##width; \ - } - FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 8); - FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 16); - FLATBUFFERS_GET_FIELD_BIT_WIDTH(u, 32); -#undef FLATBUFFERS_GET_FIELD_BIT_WIDTH - return BIT_WIDTH_64; -} - -inline BitWidth WidthI(int64_t i) { - auto u = static_cast(i) << 1; - return WidthU(i >= 0 ? u : ~u); -} - -inline BitWidth WidthF(double f) { - return static_cast(static_cast(f)) == f ? BIT_WIDTH_32 - : BIT_WIDTH_64; -} - -// Base class of all types below. -// Points into the data buffer and allows access to one type. -class Object { - public: - Object(const uint8_t *data, uint8_t byte_width) - : data_(data), byte_width_(byte_width) {} - - protected: - const uint8_t *data_; - uint8_t byte_width_; -}; - -// Object that has a size, obtained either from size prefix, or elsewhere. -class Sized : public Object { - public: - // Size prefix. - Sized(const uint8_t *data, uint8_t byte_width) - : Object(data, byte_width), size_(read_size()) {} - // Manual size. - Sized(const uint8_t *data, uint8_t byte_width, size_t sz) - : Object(data, byte_width), size_(sz) {} - size_t size() const { return size_; } - // Access size stored in `byte_width_` bytes before data_ pointer. - size_t read_size() const { - return static_cast(ReadUInt64(data_ - byte_width_, byte_width_)); - } - - protected: - size_t size_; -}; - -class String : public Sized { - public: - // Size prefix. - String(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} - // Manual size. - String(const uint8_t *data, uint8_t byte_width, size_t sz) - : Sized(data, byte_width, sz) {} - - size_t length() const { return size(); } - const char *c_str() const { return reinterpret_cast(data_); } - std::string str() const { return std::string(c_str(), size()); } - - static String EmptyString() { - static const char *empty_string = ""; - return String(reinterpret_cast(empty_string), 1, 0); - } - bool IsTheEmptyString() const { return data_ == EmptyString().data_; } -}; - -class Blob : public Sized { - public: - Blob(const uint8_t *data_buf, uint8_t byte_width) - : Sized(data_buf, byte_width) {} - - static Blob EmptyBlob() { - static const uint8_t empty_blob[] = { 0 /*len*/ }; - return Blob(empty_blob + 1, 1); - } - bool IsTheEmptyBlob() const { return data_ == EmptyBlob().data_; } - const uint8_t *data() const { return data_; } -}; - -class Vector : public Sized { - public: - Vector(const uint8_t *data, uint8_t byte_width) : Sized(data, byte_width) {} - - Reference operator[](size_t i) const; - - static Vector EmptyVector() { - static const uint8_t empty_vector[] = { 0 /*len*/ }; - return Vector(empty_vector + 1, 1); - } - bool IsTheEmptyVector() const { return data_ == EmptyVector().data_; } -}; - -class TypedVector : public Sized { - public: - TypedVector(const uint8_t *data, uint8_t byte_width, Type element_type) - : Sized(data, byte_width), type_(element_type) {} - - Reference operator[](size_t i) const; - - static TypedVector EmptyTypedVector() { - static const uint8_t empty_typed_vector[] = { 0 /*len*/ }; - return TypedVector(empty_typed_vector + 1, 1, FBT_INT); - } - bool IsTheEmptyVector() const { - return data_ == TypedVector::EmptyTypedVector().data_; - } - - Type ElementType() { return type_; } - - friend Reference; - - private: - Type type_; - - friend Map; -}; - -class FixedTypedVector : public Object { - public: - FixedTypedVector(const uint8_t *data, uint8_t byte_width, Type element_type, - uint8_t len) - : Object(data, byte_width), type_(element_type), len_(len) {} - - Reference operator[](size_t i) const; - - static FixedTypedVector EmptyFixedTypedVector() { - static const uint8_t fixed_empty_vector[] = { 0 /* unused */ }; - return FixedTypedVector(fixed_empty_vector, 1, FBT_INT, 0); - } - bool IsTheEmptyFixedTypedVector() const { - return data_ == FixedTypedVector::EmptyFixedTypedVector().data_; - } - - Type ElementType() { return type_; } - uint8_t size() { return len_; } - - private: - Type type_; - uint8_t len_; -}; - -class Map : public Vector { - public: - Map(const uint8_t *data, uint8_t byte_width) : Vector(data, byte_width) {} - - Reference operator[](const char *key) const; - Reference operator[](const std::string &key) const; - - Vector Values() const { return Vector(data_, byte_width_); } - - TypedVector Keys() const { - const size_t num_prefixed_fields = 3; - auto keys_offset = data_ - byte_width_ * num_prefixed_fields; - return TypedVector(Indirect(keys_offset, byte_width_), - static_cast( - ReadUInt64(keys_offset + byte_width_, byte_width_)), - FBT_KEY); - } - - static Map EmptyMap() { - static const uint8_t empty_map[] = { - 0 /*keys_len*/, 0 /*keys_offset*/, 1 /*keys_width*/, 0 /*len*/ - }; - return Map(empty_map + 4, 1); - } - - bool IsTheEmptyMap() const { return data_ == EmptyMap().data_; } -}; - -template -void AppendToString(std::string &s, T &&v, bool keys_quoted) { - s += "[ "; - for (size_t i = 0; i < v.size(); i++) { - if (i) s += ", "; - v[i].ToString(true, keys_quoted, s); - } - s += " ]"; -} - -class Reference { - public: - Reference() - : data_(nullptr), - parent_width_(0), - byte_width_(BIT_WIDTH_8), - type_(FBT_NULL) {} - - Reference(const uint8_t *data, uint8_t parent_width, uint8_t byte_width, - Type type) - : data_(data), - parent_width_(parent_width), - byte_width_(byte_width), - type_(type) {} - - Reference(const uint8_t *data, uint8_t parent_width, uint8_t packed_type) - : data_(data), parent_width_(parent_width) { - byte_width_ = 1U << static_cast(packed_type & 3); - type_ = static_cast(packed_type >> 2); - } - - Type GetType() const { return type_; } - - bool IsNull() const { return type_ == FBT_NULL; } - bool IsBool() const { return type_ == FBT_BOOL; } - bool IsInt() const { return type_ == FBT_INT || type_ == FBT_INDIRECT_INT; } - bool IsUInt() const { - return type_ == FBT_UINT || type_ == FBT_INDIRECT_UINT; - } - bool IsIntOrUint() const { return IsInt() || IsUInt(); } - bool IsFloat() const { - return type_ == FBT_FLOAT || type_ == FBT_INDIRECT_FLOAT; - } - bool IsNumeric() const { return IsIntOrUint() || IsFloat(); } - bool IsString() const { return type_ == FBT_STRING; } - bool IsKey() const { return type_ == FBT_KEY; } - bool IsVector() const { return type_ == FBT_VECTOR || type_ == FBT_MAP; } - bool IsUntypedVector() const { return type_ == FBT_VECTOR; } - bool IsTypedVector() const { return flexbuffers::IsTypedVector(type_); } - bool IsFixedTypedVector() const { - return flexbuffers::IsFixedTypedVector(type_); - } - bool IsAnyVector() const { - return (IsTypedVector() || IsFixedTypedVector() || IsVector()); - } - bool IsMap() const { return type_ == FBT_MAP; } - bool IsBlob() const { return type_ == FBT_BLOB; } - bool AsBool() const { - return (type_ == FBT_BOOL ? ReadUInt64(data_, parent_width_) - : AsUInt64()) != 0; - } - - // Reads any type as a int64_t. Never fails, does most sensible conversion. - // Truncates floats, strings are attempted to be parsed for a number, - // vectors/maps return their size. Returns 0 if all else fails. - int64_t AsInt64() const { - if (type_ == FBT_INT) { - // A fast path for the common case. - return ReadInt64(data_, parent_width_); - } else - switch (type_) { - case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); - case FBT_UINT: return ReadUInt64(data_, parent_width_); - case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); - case FBT_FLOAT: - return static_cast(ReadDouble(data_, parent_width_)); - case FBT_INDIRECT_FLOAT: - return static_cast(ReadDouble(Indirect(), byte_width_)); - case FBT_NULL: return 0; - case FBT_STRING: return flatbuffers::StringToInt(AsString().c_str()); - case FBT_VECTOR: return static_cast(AsVector().size()); - case FBT_BOOL: return ReadInt64(data_, parent_width_); - default: - // Convert other things to int. - return 0; - } - } - - // TODO: could specialize these to not use AsInt64() if that saves - // extension ops in generated code, and use a faster op than ReadInt64. - int32_t AsInt32() const { return static_cast(AsInt64()); } - int16_t AsInt16() const { return static_cast(AsInt64()); } - int8_t AsInt8() const { return static_cast(AsInt64()); } - - uint64_t AsUInt64() const { - if (type_ == FBT_UINT) { - // A fast path for the common case. - return ReadUInt64(data_, parent_width_); - } else - switch (type_) { - case FBT_INDIRECT_UINT: return ReadUInt64(Indirect(), byte_width_); - case FBT_INT: return ReadInt64(data_, parent_width_); - case FBT_INDIRECT_INT: return ReadInt64(Indirect(), byte_width_); - case FBT_FLOAT: - return static_cast(ReadDouble(data_, parent_width_)); - case FBT_INDIRECT_FLOAT: - return static_cast(ReadDouble(Indirect(), byte_width_)); - case FBT_NULL: return 0; - case FBT_STRING: return flatbuffers::StringToUInt(AsString().c_str()); - case FBT_VECTOR: return static_cast(AsVector().size()); - case FBT_BOOL: return ReadUInt64(data_, parent_width_); - default: - // Convert other things to uint. - return 0; - } - } - - uint32_t AsUInt32() const { return static_cast(AsUInt64()); } - uint16_t AsUInt16() const { return static_cast(AsUInt64()); } - uint8_t AsUInt8() const { return static_cast(AsUInt64()); } - - double AsDouble() const { - if (type_ == FBT_FLOAT) { - // A fast path for the common case. - return ReadDouble(data_, parent_width_); - } else - switch (type_) { - case FBT_INDIRECT_FLOAT: return ReadDouble(Indirect(), byte_width_); - case FBT_INT: - return static_cast(ReadInt64(data_, parent_width_)); - case FBT_UINT: - return static_cast(ReadUInt64(data_, parent_width_)); - case FBT_INDIRECT_INT: - return static_cast(ReadInt64(Indirect(), byte_width_)); - case FBT_INDIRECT_UINT: - return static_cast(ReadUInt64(Indirect(), byte_width_)); - case FBT_NULL: return 0.0; - case FBT_STRING: { - double d; - flatbuffers::StringToNumber(AsString().c_str(), &d); - return d; - } - case FBT_VECTOR: return static_cast(AsVector().size()); - case FBT_BOOL: - return static_cast(ReadUInt64(data_, parent_width_)); - default: - // Convert strings and other things to float. - return 0; - } - } - - float AsFloat() const { return static_cast(AsDouble()); } - - const char *AsKey() const { - if (type_ == FBT_KEY || type_ == FBT_STRING) { - return reinterpret_cast(Indirect()); - } else { - return ""; - } - } - - // This function returns the empty string if you try to read something that - // is not a string or key. - String AsString() const { - if (type_ == FBT_STRING) { - return String(Indirect(), byte_width_); - } else if (type_ == FBT_KEY) { - auto key = Indirect(); - return String(key, byte_width_, - strlen(reinterpret_cast(key))); - } else { - return String::EmptyString(); - } - } - - // Unlike AsString(), this will convert any type to a std::string. - std::string ToString() const { - std::string s; - ToString(false, false, s); - return s; - } - - // Convert any type to a JSON-like string. strings_quoted determines if - // string values at the top level receive "" quotes (inside other values - // they always do). keys_quoted determines if keys are quoted, at any level. - // TODO(wvo): add further options to have indentation/newlines. - void ToString(bool strings_quoted, bool keys_quoted, std::string &s) const { - if (type_ == FBT_STRING) { - String str(Indirect(), byte_width_); - if (strings_quoted) { - flatbuffers::EscapeString(str.c_str(), str.length(), &s, true, false); - } else { - s.append(str.c_str(), str.length()); - } - } else if (IsKey()) { - auto str = AsKey(); - if (keys_quoted) { - flatbuffers::EscapeString(str, strlen(str), &s, true, false); - } else { - s += str; - } - } else if (IsInt()) { - s += flatbuffers::NumToString(AsInt64()); - } else if (IsUInt()) { - s += flatbuffers::NumToString(AsUInt64()); - } else if (IsFloat()) { - s += flatbuffers::NumToString(AsDouble()); - } else if (IsNull()) { - s += "null"; - } else if (IsBool()) { - s += AsBool() ? "true" : "false"; - } else if (IsMap()) { - s += "{ "; - auto m = AsMap(); - auto keys = m.Keys(); - auto vals = m.Values(); - for (size_t i = 0; i < keys.size(); i++) { - keys[i].ToString(true, keys_quoted, s); - s += ": "; - vals[i].ToString(true, keys_quoted, s); - if (i < keys.size() - 1) s += ", "; - } - s += " }"; - } else if (IsVector()) { - AppendToString(s, AsVector(), keys_quoted); - } else if (IsTypedVector()) { - AppendToString(s, AsTypedVector(), keys_quoted); - } else if (IsFixedTypedVector()) { - AppendToString(s, AsFixedTypedVector(), keys_quoted); - } else if (IsBlob()) { - auto blob = AsBlob(); - flatbuffers::EscapeString(reinterpret_cast(blob.data()), - blob.size(), &s, true, false); - } else { - s += "(?)"; - } - } - - // This function returns the empty blob if you try to read a not-blob. - // Strings can be viewed as blobs too. - Blob AsBlob() const { - if (type_ == FBT_BLOB || type_ == FBT_STRING) { - return Blob(Indirect(), byte_width_); - } else { - return Blob::EmptyBlob(); - } - } - - // This function returns the empty vector if you try to read a not-vector. - // Maps can be viewed as vectors too. - Vector AsVector() const { - if (type_ == FBT_VECTOR || type_ == FBT_MAP) { - return Vector(Indirect(), byte_width_); - } else { - return Vector::EmptyVector(); - } - } - - TypedVector AsTypedVector() const { - if (IsTypedVector()) { - auto tv = - TypedVector(Indirect(), byte_width_, ToTypedVectorElementType(type_)); - if (tv.type_ == FBT_STRING) { - // These can't be accessed as strings, since we don't know the bit-width - // of the size field, see the declaration of - // FBT_VECTOR_STRING_DEPRECATED above for details. - // We change the type here to be keys, which are a subtype of strings, - // and will ignore the size field. This will truncate strings with - // embedded nulls. - tv.type_ = FBT_KEY; - } - return tv; - } else { - return TypedVector::EmptyTypedVector(); - } - } - - FixedTypedVector AsFixedTypedVector() const { - if (IsFixedTypedVector()) { - uint8_t len = 0; - auto vtype = ToFixedTypedVectorElementType(type_, &len); - return FixedTypedVector(Indirect(), byte_width_, vtype, len); - } else { - return FixedTypedVector::EmptyFixedTypedVector(); - } - } - - Map AsMap() const { - if (type_ == FBT_MAP) { - return Map(Indirect(), byte_width_); - } else { - return Map::EmptyMap(); - } - } - - template T As() const; - - // Experimental: Mutation functions. - // These allow scalars in an already created buffer to be updated in-place. - // Since by default scalars are stored in the smallest possible space, - // the new value may not fit, in which case these functions return false. - // To avoid this, you can construct the values you intend to mutate using - // Builder::ForceMinimumBitWidth. - bool MutateInt(int64_t i) { - if (type_ == FBT_INT) { - return Mutate(data_, i, parent_width_, WidthI(i)); - } else if (type_ == FBT_INDIRECT_INT) { - return Mutate(Indirect(), i, byte_width_, WidthI(i)); - } else if (type_ == FBT_UINT) { - auto u = static_cast(i); - return Mutate(data_, u, parent_width_, WidthU(u)); - } else if (type_ == FBT_INDIRECT_UINT) { - auto u = static_cast(i); - return Mutate(Indirect(), u, byte_width_, WidthU(u)); - } else { - return false; - } - } - - bool MutateBool(bool b) { - return type_ == FBT_BOOL && Mutate(data_, b, parent_width_, BIT_WIDTH_8); - } - - bool MutateUInt(uint64_t u) { - if (type_ == FBT_UINT) { - return Mutate(data_, u, parent_width_, WidthU(u)); - } else if (type_ == FBT_INDIRECT_UINT) { - return Mutate(Indirect(), u, byte_width_, WidthU(u)); - } else if (type_ == FBT_INT) { - auto i = static_cast(u); - return Mutate(data_, i, parent_width_, WidthI(i)); - } else if (type_ == FBT_INDIRECT_INT) { - auto i = static_cast(u); - return Mutate(Indirect(), i, byte_width_, WidthI(i)); - } else { - return false; - } - } - - bool MutateFloat(float f) { - if (type_ == FBT_FLOAT) { - return MutateF(data_, f, parent_width_, BIT_WIDTH_32); - } else if (type_ == FBT_INDIRECT_FLOAT) { - return MutateF(Indirect(), f, byte_width_, BIT_WIDTH_32); - } else { - return false; - } - } - - bool MutateFloat(double d) { - if (type_ == FBT_FLOAT) { - return MutateF(data_, d, parent_width_, WidthF(d)); - } else if (type_ == FBT_INDIRECT_FLOAT) { - return MutateF(Indirect(), d, byte_width_, WidthF(d)); - } else { - return false; - } - } - - bool MutateString(const char *str, size_t len) { - auto s = AsString(); - if (s.IsTheEmptyString()) return false; - // This is very strict, could allow shorter strings, but that creates - // garbage. - if (s.length() != len) return false; - memcpy(const_cast(s.c_str()), str, len); - return true; - } - bool MutateString(const char *str) { return MutateString(str, strlen(str)); } - bool MutateString(const std::string &str) { - return MutateString(str.data(), str.length()); - } - - private: - const uint8_t *Indirect() const { - return flexbuffers::Indirect(data_, parent_width_); - } - - template - bool Mutate(const uint8_t *dest, T t, size_t byte_width, - BitWidth value_width) { - auto fits = static_cast(static_cast(1U) << value_width) <= - byte_width; - if (fits) { - t = flatbuffers::EndianScalar(t); - memcpy(const_cast(dest), &t, byte_width); - } - return fits; - } - - template - bool MutateF(const uint8_t *dest, T t, size_t byte_width, - BitWidth value_width) { - if (byte_width == sizeof(double)) - return Mutate(dest, static_cast(t), byte_width, value_width); - if (byte_width == sizeof(float)) - return Mutate(dest, static_cast(t), byte_width, value_width); - FLATBUFFERS_ASSERT(false); - return false; - } - - const uint8_t *data_; - uint8_t parent_width_; - uint8_t byte_width_; - Type type_; -}; - -// Template specialization for As(). -template<> inline bool Reference::As() const { return AsBool(); } - -template<> inline int8_t Reference::As() const { return AsInt8(); } -template<> inline int16_t Reference::As() const { return AsInt16(); } -template<> inline int32_t Reference::As() const { return AsInt32(); } -template<> inline int64_t Reference::As() const { return AsInt64(); } - -template<> inline uint8_t Reference::As() const { return AsUInt8(); } -template<> inline uint16_t Reference::As() const { - return AsUInt16(); -} -template<> inline uint32_t Reference::As() const { - return AsUInt32(); -} -template<> inline uint64_t Reference::As() const { - return AsUInt64(); -} - -template<> inline double Reference::As() const { return AsDouble(); } -template<> inline float Reference::As() const { return AsFloat(); } - -template<> inline String Reference::As() const { return AsString(); } -template<> inline std::string Reference::As() const { - return AsString().str(); -} - -template<> inline Blob Reference::As() const { return AsBlob(); } -template<> inline Vector Reference::As() const { return AsVector(); } -template<> inline TypedVector Reference::As() const { - return AsTypedVector(); -} -template<> inline FixedTypedVector Reference::As() const { - return AsFixedTypedVector(); -} -template<> inline Map Reference::As() const { return AsMap(); } - -inline uint8_t PackedType(BitWidth bit_width, Type type) { - return static_cast(bit_width | (type << 2)); -} - -inline uint8_t NullPackedType() { return PackedType(BIT_WIDTH_8, FBT_NULL); } - -// Vector accessors. -// Note: if you try to access outside of bounds, you get a Null value back -// instead. Normally this would be an CHECK, but since this is "dynamically -// typed" data, you may not want that (someone sends you a 2d vector and you -// wanted 3d). -// The Null converts seamlessly into a default value for any other type. -// TODO(wvo): Could introduce an #ifdef that makes this into an CHECK? -inline Reference Vector::operator[](size_t i) const { - auto len = size(); - if (i >= len) return Reference(nullptr, 1, NullPackedType()); - auto packed_type = (data_ + len * byte_width_)[i]; - auto elem = data_ + i * byte_width_; - return Reference(elem, byte_width_, packed_type); -} - -inline Reference TypedVector::operator[](size_t i) const { - auto len = size(); - if (i >= len) return Reference(nullptr, 1, NullPackedType()); - auto elem = data_ + i * byte_width_; - return Reference(elem, byte_width_, 1, type_); -} - -inline Reference FixedTypedVector::operator[](size_t i) const { - if (i >= len_) return Reference(nullptr, 1, NullPackedType()); - auto elem = data_ + i * byte_width_; - return Reference(elem, byte_width_, 1, type_); -} - -template int KeyCompare(const void *key, const void *elem) { - auto str_elem = reinterpret_cast( - Indirect(reinterpret_cast(elem))); - auto skey = reinterpret_cast(key); - return strcmp(skey, str_elem); -} - -inline Reference Map::operator[](const char *key) const { - auto keys = Keys(); - // We can't pass keys.byte_width_ to the comparison function, so we have - // to pick the right one ahead of time. - int (*comp)(const void *, const void *) = nullptr; - switch (keys.byte_width_) { - case 1: comp = KeyCompare; break; - case 2: comp = KeyCompare; break; - case 4: comp = KeyCompare; break; - case 8: comp = KeyCompare; break; - } - auto res = std::bsearch(key, keys.data_, keys.size(), keys.byte_width_, comp); - if (!res) return Reference(nullptr, 1, NullPackedType()); - auto i = (reinterpret_cast(res) - keys.data_) / keys.byte_width_; - return (*static_cast(this))[i]; -} - -inline Reference Map::operator[](const std::string &key) const { - return (*this)[key.c_str()]; -} - -inline Reference GetRoot(const uint8_t *buffer, size_t size) { - // See Finish() below for the serialization counterpart of this. - // The root starts at the end of the buffer, so we parse backwards from there. - auto end = buffer + size; - auto byte_width = *--end; - auto packed_type = *--end; - end -= byte_width; // The root data item. - return Reference(end, byte_width, packed_type); -} - -inline Reference GetRoot(const std::vector &buffer) { - return GetRoot(flatbuffers::vector_data(buffer), buffer.size()); -} - -// Flags that configure how the Builder behaves. -// The "Share" flags determine if the Builder automatically tries to pool -// this type. Pooling can reduce the size of serialized data if there are -// multiple maps of the same kind, at the expense of slightly slower -// serialization (the cost of lookups) and more memory use (std::set). -// By default this is on for keys, but off for strings. -// Turn keys off if you have e.g. only one map. -// Turn strings on if you expect many non-unique string values. -// Additionally, sharing key vectors can save space if you have maps with -// identical field populations. -enum BuilderFlag { - BUILDER_FLAG_NONE = 0, - BUILDER_FLAG_SHARE_KEYS = 1, - BUILDER_FLAG_SHARE_STRINGS = 2, - BUILDER_FLAG_SHARE_KEYS_AND_STRINGS = 3, - BUILDER_FLAG_SHARE_KEY_VECTORS = 4, - BUILDER_FLAG_SHARE_ALL = 7, -}; - -class Builder FLATBUFFERS_FINAL_CLASS { - public: - Builder(size_t initial_size = 256, - BuilderFlag flags = BUILDER_FLAG_SHARE_KEYS) - : buf_(initial_size), - finished_(false), - flags_(flags), - force_min_bit_width_(BIT_WIDTH_8), - key_pool(KeyOffsetCompare(buf_)), - string_pool(StringOffsetCompare(buf_)) { - buf_.clear(); - } - - /// @brief Get the serialized buffer (after you call `Finish()`). - /// @return Returns a vector owned by this class. - const std::vector &GetBuffer() const { - Finished(); - return buf_; - } - - // Size of the buffer. Does not include unfinished values. - size_t GetSize() const { return buf_.size(); } - - // Reset all state so we can re-use the buffer. - void Clear() { - buf_.clear(); - stack_.clear(); - finished_ = false; - // flags_ remains as-is; - force_min_bit_width_ = BIT_WIDTH_8; - key_pool.clear(); - string_pool.clear(); - } - - // All value constructing functions below have two versions: one that - // takes a key (for placement inside a map) and one that doesn't (for inside - // vectors and elsewhere). - - void Null() { stack_.push_back(Value()); } - void Null(const char *key) { - Key(key); - Null(); - } - - void Int(int64_t i) { stack_.push_back(Value(i, FBT_INT, WidthI(i))); } - void Int(const char *key, int64_t i) { - Key(key); - Int(i); - } - - void UInt(uint64_t u) { stack_.push_back(Value(u, FBT_UINT, WidthU(u))); } - void UInt(const char *key, uint64_t u) { - Key(key); - UInt(u); - } - - void Float(float f) { stack_.push_back(Value(f)); } - void Float(const char *key, float f) { - Key(key); - Float(f); - } - - void Double(double f) { stack_.push_back(Value(f)); } - void Double(const char *key, double d) { - Key(key); - Double(d); - } - - void Bool(bool b) { stack_.push_back(Value(b)); } - void Bool(const char *key, bool b) { - Key(key); - Bool(b); - } - - void IndirectInt(int64_t i) { PushIndirect(i, FBT_INDIRECT_INT, WidthI(i)); } - void IndirectInt(const char *key, int64_t i) { - Key(key); - IndirectInt(i); - } - - void IndirectUInt(uint64_t u) { - PushIndirect(u, FBT_INDIRECT_UINT, WidthU(u)); - } - void IndirectUInt(const char *key, uint64_t u) { - Key(key); - IndirectUInt(u); - } - - void IndirectFloat(float f) { - PushIndirect(f, FBT_INDIRECT_FLOAT, BIT_WIDTH_32); - } - void IndirectFloat(const char *key, float f) { - Key(key); - IndirectFloat(f); - } - - void IndirectDouble(double f) { - PushIndirect(f, FBT_INDIRECT_FLOAT, WidthF(f)); - } - void IndirectDouble(const char *key, double d) { - Key(key); - IndirectDouble(d); - } - - size_t Key(const char *str, size_t len) { - auto sloc = buf_.size(); - WriteBytes(str, len + 1); - if (flags_ & BUILDER_FLAG_SHARE_KEYS) { - auto it = key_pool.find(sloc); - if (it != key_pool.end()) { - // Already in the buffer. Remove key we just serialized, and use - // existing offset instead. - buf_.resize(sloc); - sloc = *it; - } else { - key_pool.insert(sloc); - } - } - stack_.push_back(Value(static_cast(sloc), FBT_KEY, BIT_WIDTH_8)); - return sloc; - } - - size_t Key(const char *str) { return Key(str, strlen(str)); } - size_t Key(const std::string &str) { return Key(str.c_str(), str.size()); } - - size_t String(const char *str, size_t len) { - auto reset_to = buf_.size(); - auto sloc = CreateBlob(str, len, 1, FBT_STRING); - if (flags_ & BUILDER_FLAG_SHARE_STRINGS) { - StringOffset so(sloc, len); - auto it = string_pool.find(so); - if (it != string_pool.end()) { - // Already in the buffer. Remove string we just serialized, and use - // existing offset instead. - buf_.resize(reset_to); - sloc = it->first; - stack_.back().u_ = sloc; - } else { - string_pool.insert(so); - } - } - return sloc; - } - size_t String(const char *str) { return String(str, strlen(str)); } - size_t String(const std::string &str) { - return String(str.c_str(), str.size()); - } - void String(const flexbuffers::String &str) { - String(str.c_str(), str.length()); - } - - void String(const char *key, const char *str) { - Key(key); - String(str); - } - void String(const char *key, const std::string &str) { - Key(key); - String(str); - } - void String(const char *key, const flexbuffers::String &str) { - Key(key); - String(str); - } - - size_t Blob(const void *data, size_t len) { - return CreateBlob(data, len, 0, FBT_BLOB); - } - size_t Blob(const std::vector &v) { - return CreateBlob(flatbuffers::vector_data(v), v.size(), 0, FBT_BLOB); - } - - // TODO(wvo): support all the FlexBuffer types (like flexbuffers::String), - // e.g. Vector etc. Also in overloaded versions. - // Also some FlatBuffers types? - - size_t StartVector() { return stack_.size(); } - size_t StartVector(const char *key) { - Key(key); - return stack_.size(); - } - size_t StartMap() { return stack_.size(); } - size_t StartMap(const char *key) { - Key(key); - return stack_.size(); - } - - // TODO(wvo): allow this to specify an aligment greater than the natural - // alignment. - size_t EndVector(size_t start, bool typed, bool fixed) { - auto vec = CreateVector(start, stack_.size() - start, 1, typed, fixed); - // Remove temp elements and return vector. - stack_.resize(start); - stack_.push_back(vec); - return static_cast(vec.u_); - } - - size_t EndMap(size_t start) { - // We should have interleaved keys and values on the stack. - // Make sure it is an even number: - auto len = stack_.size() - start; - FLATBUFFERS_ASSERT(!(len & 1)); - len /= 2; - // Make sure keys are all strings: - for (auto key = start; key < stack_.size(); key += 2) { - FLATBUFFERS_ASSERT(stack_[key].type_ == FBT_KEY); - } - // Now sort values, so later we can do a binary search lookup. - // We want to sort 2 array elements at a time. - struct TwoValue { - Value key; - Value val; - }; - // TODO(wvo): strict aliasing? - // TODO(wvo): allow the caller to indicate the data is already sorted - // for maximum efficiency? With an CHECK to check sortedness to make sure - // we're not breaking binary search. - // Or, we can track if the map is sorted as keys are added which would be - // be quite cheap (cheaper than checking it here), so we can skip this - // step automatically when appliccable, and encourage people to write in - // sorted fashion. - // std::sort is typically already a lot faster on sorted data though. - auto dict = - reinterpret_cast(flatbuffers::vector_data(stack_) + start); - std::sort(dict, dict + len, - [&](const TwoValue &a, const TwoValue &b) -> bool { - auto as = reinterpret_cast( - flatbuffers::vector_data(buf_) + a.key.u_); - auto bs = reinterpret_cast( - flatbuffers::vector_data(buf_) + b.key.u_); - auto comp = strcmp(as, bs); - // If this assertion hits, you've added two keys with the same - // value to this map. - // TODO: Have to check for pointer equality, as some sort - // implementation apparently call this function with the same - // element?? Why? - FLATBUFFERS_ASSERT(comp || &a == &b); - return comp < 0; - }); - // First create a vector out of all keys. - // TODO(wvo): if kBuilderFlagShareKeyVectors is true, see if we can share - // the first vector. - auto keys = CreateVector(start, len, 2, true, false); - auto vec = CreateVector(start + 1, len, 2, false, false, &keys); - // Remove temp elements and return map. - stack_.resize(start); - stack_.push_back(vec); - return static_cast(vec.u_); - } - - template size_t Vector(F f) { - auto start = StartVector(); - f(); - return EndVector(start, false, false); - } - template size_t Vector(F f, T &state) { - auto start = StartVector(); - f(state); - return EndVector(start, false, false); - } - template size_t Vector(const char *key, F f) { - auto start = StartVector(key); - f(); - return EndVector(start, false, false); - } - template - size_t Vector(const char *key, F f, T &state) { - auto start = StartVector(key); - f(state); - return EndVector(start, false, false); - } - - template void Vector(const T *elems, size_t len) { - if (flatbuffers::is_scalar::value) { - // This path should be a lot quicker and use less space. - ScalarVector(elems, len, false); - } else { - auto start = StartVector(); - for (size_t i = 0; i < len; i++) Add(elems[i]); - EndVector(start, false, false); - } - } - template - void Vector(const char *key, const T *elems, size_t len) { - Key(key); - Vector(elems, len); - } - template void Vector(const std::vector &vec) { - Vector(flatbuffers::vector_data(vec), vec.size()); - } - - template size_t TypedVector(F f) { - auto start = StartVector(); - f(); - return EndVector(start, true, false); - } - template size_t TypedVector(F f, T &state) { - auto start = StartVector(); - f(state); - return EndVector(start, true, false); - } - template size_t TypedVector(const char *key, F f) { - auto start = StartVector(key); - f(); - return EndVector(start, true, false); - } - template - size_t TypedVector(const char *key, F f, T &state) { - auto start = StartVector(key); - f(state); - return EndVector(start, true, false); - } - - template size_t FixedTypedVector(const T *elems, size_t len) { - // We only support a few fixed vector lengths. Anything bigger use a - // regular typed vector. - FLATBUFFERS_ASSERT(len >= 2 && len <= 4); - // And only scalar values. - static_assert(flatbuffers::is_scalar::value, "Unrelated types"); - return ScalarVector(elems, len, true); - } - - template - size_t FixedTypedVector(const char *key, const T *elems, size_t len) { - Key(key); - return FixedTypedVector(elems, len); - } - - template size_t Map(F f) { - auto start = StartMap(); - f(); - return EndMap(start); - } - template size_t Map(F f, T &state) { - auto start = StartMap(); - f(state); - return EndMap(start); - } - template size_t Map(const char *key, F f) { - auto start = StartMap(key); - f(); - return EndMap(start); - } - template size_t Map(const char *key, F f, T &state) { - auto start = StartMap(key); - f(state); - return EndMap(start); - } - template void Map(const std::map &map) { - auto start = StartMap(); - for (auto it = map.begin(); it != map.end(); ++it) - Add(it->first.c_str(), it->second); - EndMap(start); - } - - // If you wish to share a value explicitly (a value not shared automatically - // through one of the BUILDER_FLAG_SHARE_* flags) you can do so with these - // functions. Or if you wish to turn those flags off for performance reasons - // and still do some explicit sharing. For example: - // builder.IndirectDouble(M_PI); - // auto id = builder.LastValue(); // Remember where we stored it. - // .. more code goes here .. - // builder.ReuseValue(id); // Refers to same double by offset. - // LastValue works regardless of wether the value has a key or not. - // Works on any data type. - struct Value; - Value LastValue() { return stack_.back(); } - void ReuseValue(Value v) { stack_.push_back(v); } - void ReuseValue(const char *key, Value v) { - Key(key); - ReuseValue(v); - } - - // Overloaded Add that tries to call the correct function above. - void Add(int8_t i) { Int(i); } - void Add(int16_t i) { Int(i); } - void Add(int32_t i) { Int(i); } - void Add(int64_t i) { Int(i); } - void Add(uint8_t u) { UInt(u); } - void Add(uint16_t u) { UInt(u); } - void Add(uint32_t u) { UInt(u); } - void Add(uint64_t u) { UInt(u); } - void Add(float f) { Float(f); } - void Add(double d) { Double(d); } - void Add(bool b) { Bool(b); } - void Add(const char *str) { String(str); } - void Add(const std::string &str) { String(str); } - void Add(const flexbuffers::String &str) { String(str); } - - template void Add(const std::vector &vec) { Vector(vec); } - - template void Add(const char *key, const T &t) { - Key(key); - Add(t); - } - - template void Add(const std::map &map) { - Map(map); - } - - template void operator+=(const T &t) { Add(t); } - - // This function is useful in combination with the Mutate* functions above. - // It forces elements of vectors and maps to have a minimum size, such that - // they can later be updated without failing. - // Call with no arguments to reset. - void ForceMinimumBitWidth(BitWidth bw = BIT_WIDTH_8) { - force_min_bit_width_ = bw; - } - - void Finish() { - // If you hit this CHECK, you likely have objects that were never included - // in a parent. You need to have exactly one root to finish a buffer. - // Check your Start/End calls are matched, and all objects are inside - // some other object. - FLATBUFFERS_ASSERT(stack_.size() == 1); - - // Write root value. - auto byte_width = Align(stack_[0].ElemWidth(buf_.size(), 0)); - WriteAny(stack_[0], byte_width); - // Write root type. - Write(stack_[0].StoredPackedType(), 1); - // Write root size. Normally determined by parent, but root has no parent :) - Write(byte_width, 1); - - finished_ = true; - } - - private: - void Finished() const { - // If you get this CHECK, you're attempting to get access a buffer - // which hasn't been finished yet. Be sure to call - // Builder::Finish with your root object. - FLATBUFFERS_ASSERT(finished_); - } - - // Align to prepare for writing a scalar with a certain size. - uint8_t Align(BitWidth alignment) { - auto byte_width = 1U << alignment; - buf_.insert(buf_.end(), flatbuffers::PaddingBytes(buf_.size(), byte_width), - 0); - return static_cast(byte_width); - } - - void WriteBytes(const void *val, size_t size) { - buf_.insert(buf_.end(), reinterpret_cast(val), - reinterpret_cast(val) + size); - } - - template void Write(T val, size_t byte_width) { - FLATBUFFERS_ASSERT(sizeof(T) >= byte_width); - val = flatbuffers::EndianScalar(val); - WriteBytes(&val, byte_width); - } - - void WriteDouble(double f, uint8_t byte_width) { - switch (byte_width) { - case 8: Write(f, byte_width); break; - case 4: Write(static_cast(f), byte_width); break; - // case 2: Write(static_cast(f), byte_width); break; - // case 1: Write(static_cast(f), byte_width); break; - default: FLATBUFFERS_ASSERT(0); - } - } - - void WriteOffset(uint64_t o, uint8_t byte_width) { - auto reloff = buf_.size() - o; - FLATBUFFERS_ASSERT(byte_width == 8 || reloff < 1ULL << (byte_width * 8)); - Write(reloff, byte_width); - } - - template void PushIndirect(T val, Type type, BitWidth bit_width) { - auto byte_width = Align(bit_width); - auto iloc = buf_.size(); - Write(val, byte_width); - stack_.push_back(Value(static_cast(iloc), type, bit_width)); - } - - static BitWidth WidthB(size_t byte_width) { - switch (byte_width) { - case 1: return BIT_WIDTH_8; - case 2: return BIT_WIDTH_16; - case 4: return BIT_WIDTH_32; - case 8: return BIT_WIDTH_64; - default: FLATBUFFERS_ASSERT(false); return BIT_WIDTH_64; - } - } - - template static Type GetScalarType() { - static_assert(flatbuffers::is_scalar::value, "Unrelated types"); - return flatbuffers::is_floating_point::value - ? FBT_FLOAT - : flatbuffers::is_same::value - ? FBT_BOOL - : (flatbuffers::is_unsigned::value ? FBT_UINT - : FBT_INT); - } - - public: - // This was really intended to be private, except for LastValue/ReuseValue. - struct Value { - union { - int64_t i_; - uint64_t u_; - double f_; - }; - - Type type_; - - // For scalars: of itself, for vector: of its elements, for string: length. - BitWidth min_bit_width_; - - Value() : i_(0), type_(FBT_NULL), min_bit_width_(BIT_WIDTH_8) {} - - Value(bool b) - : u_(static_cast(b)), - type_(FBT_BOOL), - min_bit_width_(BIT_WIDTH_8) {} - - Value(int64_t i, Type t, BitWidth bw) - : i_(i), type_(t), min_bit_width_(bw) {} - Value(uint64_t u, Type t, BitWidth bw) - : u_(u), type_(t), min_bit_width_(bw) {} - - Value(float f) : f_(f), type_(FBT_FLOAT), min_bit_width_(BIT_WIDTH_32) {} - Value(double f) : f_(f), type_(FBT_FLOAT), min_bit_width_(WidthF(f)) {} - - uint8_t StoredPackedType(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { - return PackedType(StoredWidth(parent_bit_width_), type_); - } - - BitWidth ElemWidth(size_t buf_size, size_t elem_index) const { - if (IsInline(type_)) { - return min_bit_width_; - } else { - // We have an absolute offset, but want to store a relative offset - // elem_index elements beyond the current buffer end. Since whether - // the relative offset fits in a certain byte_width depends on - // the size of the elements before it (and their alignment), we have - // to test for each size in turn. - for (size_t byte_width = 1; - byte_width <= sizeof(flatbuffers::largest_scalar_t); - byte_width *= 2) { - // Where are we going to write this offset? - auto offset_loc = buf_size + - flatbuffers::PaddingBytes(buf_size, byte_width) + - elem_index * byte_width; - // Compute relative offset. - auto offset = offset_loc - u_; - // Does it fit? - auto bit_width = WidthU(offset); - if (static_cast(static_cast(1U) << bit_width) == - byte_width) - return bit_width; - } - FLATBUFFERS_ASSERT(false); // Must match one of the sizes above. - return BIT_WIDTH_64; - } - } - - BitWidth StoredWidth(BitWidth parent_bit_width_ = BIT_WIDTH_8) const { - if (IsInline(type_)) { - return (std::max)(min_bit_width_, parent_bit_width_); - } else { - return min_bit_width_; - } - } - }; - - private: - void WriteAny(const Value &val, uint8_t byte_width) { - switch (val.type_) { - case FBT_NULL: - case FBT_INT: Write(val.i_, byte_width); break; - case FBT_BOOL: - case FBT_UINT: Write(val.u_, byte_width); break; - case FBT_FLOAT: WriteDouble(val.f_, byte_width); break; - default: WriteOffset(val.u_, byte_width); break; - } - } - - size_t CreateBlob(const void *data, size_t len, size_t trailing, Type type) { - auto bit_width = WidthU(len); - auto byte_width = Align(bit_width); - Write(len, byte_width); - auto sloc = buf_.size(); - WriteBytes(data, len + trailing); - stack_.push_back(Value(static_cast(sloc), type, bit_width)); - return sloc; - } - - template - size_t ScalarVector(const T *elems, size_t len, bool fixed) { - auto vector_type = GetScalarType(); - auto byte_width = sizeof(T); - auto bit_width = WidthB(byte_width); - // If you get this CHECK, you're trying to write a vector with a size - // field that is bigger than the scalars you're trying to write (e.g. a - // byte vector > 255 elements). For such types, write a "blob" instead. - // TODO: instead of asserting, could write vector with larger elements - // instead, though that would be wasteful. - FLATBUFFERS_ASSERT(WidthU(len) <= bit_width); - Align(bit_width); - if (!fixed) Write(len, byte_width); - auto vloc = buf_.size(); - for (size_t i = 0; i < len; i++) Write(elems[i], byte_width); - stack_.push_back(Value(static_cast(vloc), - ToTypedVector(vector_type, fixed ? len : 0), - bit_width)); - return vloc; - } - - Value CreateVector(size_t start, size_t vec_len, size_t step, bool typed, - bool fixed, const Value *keys = nullptr) { - FLATBUFFERS_ASSERT( - !fixed || - typed); // typed=false, fixed=true combination is not supported. - // Figure out smallest bit width we can store this vector with. - auto bit_width = (std::max)(force_min_bit_width_, WidthU(vec_len)); - auto prefix_elems = 1; - if (keys) { - // If this vector is part of a map, we will pre-fix an offset to the keys - // to this vector. - bit_width = (std::max)(bit_width, keys->ElemWidth(buf_.size(), 0)); - prefix_elems += 2; - } - Type vector_type = FBT_KEY; - // Check bit widths and types for all elements. - for (size_t i = start; i < stack_.size(); i += step) { - auto elem_width = stack_[i].ElemWidth(buf_.size(), i + prefix_elems); - bit_width = (std::max)(bit_width, elem_width); - if (typed) { - if (i == start) { - vector_type = stack_[i].type_; - } else { - // If you get this CHECK, you are writing a typed vector with - // elements that are not all the same type. - FLATBUFFERS_ASSERT(vector_type == stack_[i].type_); - } - } - } - // If you get this CHECK, your fixed types are not one of: - // Int / UInt / Float / Key. - FLATBUFFERS_ASSERT(!fixed || IsTypedVectorElementType(vector_type)); - auto byte_width = Align(bit_width); - // Write vector. First the keys width/offset if available, and size. - if (keys) { - WriteOffset(keys->u_, byte_width); - Write(1ULL << keys->min_bit_width_, byte_width); - } - if (!fixed) Write(vec_len, byte_width); - // Then the actual data. - auto vloc = buf_.size(); - for (size_t i = start; i < stack_.size(); i += step) { - WriteAny(stack_[i], byte_width); - } - // Then the types. - if (!typed) { - for (size_t i = start; i < stack_.size(); i += step) { - buf_.push_back(stack_[i].StoredPackedType(bit_width)); - } - } - return Value(static_cast(vloc), - keys ? FBT_MAP - : (typed ? ToTypedVector(vector_type, fixed ? vec_len : 0) - : FBT_VECTOR), - bit_width); - } - - // You shouldn't really be copying instances of this class. - Builder(const Builder &); - Builder &operator=(const Builder &); - - std::vector buf_; - std::vector stack_; - - bool finished_; - - BuilderFlag flags_; - - BitWidth force_min_bit_width_; - - struct KeyOffsetCompare { - explicit KeyOffsetCompare(const std::vector &buf) : buf_(&buf) {} - bool operator()(size_t a, size_t b) const { - auto stra = - reinterpret_cast(flatbuffers::vector_data(*buf_) + a); - auto strb = - reinterpret_cast(flatbuffers::vector_data(*buf_) + b); - return strcmp(stra, strb) < 0; - } - const std::vector *buf_; - }; - - typedef std::pair StringOffset; - struct StringOffsetCompare { - explicit StringOffsetCompare(const std::vector &buf) - : buf_(&buf) {} - bool operator()(const StringOffset &a, const StringOffset &b) const { - auto stra = reinterpret_cast( - flatbuffers::vector_data(*buf_) + a.first); - auto strb = reinterpret_cast( - flatbuffers::vector_data(*buf_) + b.first); - return strncmp(stra, strb, (std::min)(a.second, b.second) + 1) < 0; - } - const std::vector *buf_; - }; - - typedef std::set KeyOffsetMap; - typedef std::set StringOffsetMap; - - KeyOffsetMap key_pool; - StringOffsetMap string_pool; -}; - -} // namespace flexbuffers - -#if defined(_MSC_VER) -# pragma warning(pop) -#endif - -#endif // FLATBUFFERS_FLEXBUFFERS_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/stl_emulation.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/stl_emulation.h deleted file mode 100644 index 8bae61bf..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/stl_emulation.h +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Copyright 2017 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_STL_EMULATION_H_ -#define FLATBUFFERS_STL_EMULATION_H_ - -// clang-format off - -#include -#include -#include -#include -#include - -#if defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL) - #define FLATBUFFERS_CPP98_STL -#endif // defined(_STLPORT_VERSION) && !defined(FLATBUFFERS_CPP98_STL) - -#if defined(FLATBUFFERS_CPP98_STL) - #include -#endif // defined(FLATBUFFERS_CPP98_STL) - -// Check if we can use template aliases -// Not possible if Microsoft Compiler before 2012 -// Possible is the language feature __cpp_alias_templates is defined well -// Or possible if the C++ std is C+11 or newer -#if (defined(_MSC_VER) && _MSC_VER > 1700 /* MSVC2012 */) \ - || (defined(__cpp_alias_templates) && __cpp_alias_templates >= 200704) \ - || (defined(__cplusplus) && __cplusplus >= 201103L) - #define FLATBUFFERS_TEMPLATES_ALIASES -#endif - -// This header provides backwards compatibility for C++98 STLs like stlport. -namespace flatbuffers { - -// Retrieve ::back() from a string in a way that is compatible with pre C++11 -// STLs (e.g stlport). -inline char& string_back(std::string &value) { - return value[value.length() - 1]; -} - -inline char string_back(const std::string &value) { - return value[value.length() - 1]; -} - -// Helper method that retrieves ::data() from a vector in a way that is -// compatible with pre C++11 STLs (e.g stlport). -template inline T *vector_data(std::vector &vector) { - // In some debug environments, operator[] does bounds checking, so &vector[0] - // can't be used. - return vector.empty() ? nullptr : &vector[0]; -} - -template inline const T *vector_data( - const std::vector &vector) { - return vector.empty() ? nullptr : &vector[0]; -} - -template -inline void vector_emplace_back(std::vector *vector, V &&data) { - #if defined(FLATBUFFERS_CPP98_STL) - vector->push_back(data); - #else - vector->emplace_back(std::forward(data)); - #endif // defined(FLATBUFFERS_CPP98_STL) -} - -#ifndef FLATBUFFERS_CPP98_STL - #if defined(FLATBUFFERS_TEMPLATES_ALIASES) - template - using numeric_limits = std::numeric_limits; - #else - template class numeric_limits : - public std::numeric_limits {}; - #endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) -#else - template class numeric_limits : - public std::numeric_limits { - public: - // Android NDK fix. - static T lowest() { - return std::numeric_limits::min(); - } - }; - - template <> class numeric_limits : - public std::numeric_limits { - public: - static float lowest() { return -FLT_MAX; } - }; - - template <> class numeric_limits : - public std::numeric_limits { - public: - static double lowest() { return -DBL_MAX; } - }; - - template <> class numeric_limits { - public: - static unsigned long long min() { return 0ULL; } - static unsigned long long max() { return ~0ULL; } - static unsigned long long lowest() { - return numeric_limits::min(); - } - }; - - template <> class numeric_limits { - public: - static long long min() { - return static_cast(1ULL << ((sizeof(long long) << 3) - 1)); - } - static long long max() { - return static_cast( - (1ULL << ((sizeof(long long) << 3) - 1)) - 1); - } - static long long lowest() { - return numeric_limits::min(); - } - }; -#endif // FLATBUFFERS_CPP98_STL - -#if defined(FLATBUFFERS_TEMPLATES_ALIASES) - #ifndef FLATBUFFERS_CPP98_STL - template using is_scalar = std::is_scalar; - template using is_same = std::is_same; - template using is_floating_point = std::is_floating_point; - template using is_unsigned = std::is_unsigned; - template using is_enum = std::is_enum; - template using make_unsigned = std::make_unsigned; - template - using conditional = std::conditional; - template - using integral_constant = std::integral_constant; - #else - // Map C++ TR1 templates defined by stlport. - template using is_scalar = std::tr1::is_scalar; - template using is_same = std::tr1::is_same; - template using is_floating_point = - std::tr1::is_floating_point; - template using is_unsigned = std::tr1::is_unsigned; - template using is_enum = std::tr1::is_enum; - // Android NDK doesn't have std::make_unsigned or std::tr1::make_unsigned. - template struct make_unsigned { - static_assert(is_unsigned::value, "Specialization not implemented!"); - using type = T; - }; - template<> struct make_unsigned { using type = unsigned char; }; - template<> struct make_unsigned { using type = unsigned short; }; - template<> struct make_unsigned { using type = unsigned int; }; - template<> struct make_unsigned { using type = unsigned long; }; - template<> - struct make_unsigned { using type = unsigned long long; }; - template - using conditional = std::tr1::conditional; - template - using integral_constant = std::tr1::integral_constant; - #endif // !FLATBUFFERS_CPP98_STL -#else - // MSVC 2010 doesn't support C++11 aliases. - template struct is_scalar : public std::is_scalar {}; - template struct is_same : public std::is_same {}; - template struct is_floating_point : - public std::is_floating_point {}; - template struct is_unsigned : public std::is_unsigned {}; - template struct is_enum : public std::is_enum {}; - template struct make_unsigned : public std::make_unsigned {}; - template - struct conditional : public std::conditional {}; - template - struct integral_constant : public std::integral_constant {}; -#endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) - -#ifndef FLATBUFFERS_CPP98_STL - #if defined(FLATBUFFERS_TEMPLATES_ALIASES) - template using unique_ptr = std::unique_ptr; - #else - // MSVC 2010 doesn't support C++11 aliases. - // We're manually "aliasing" the class here as we want to bring unique_ptr - // into the flatbuffers namespace. We have unique_ptr in the flatbuffers - // namespace we have a completely independent implemenation (see below) - // for C++98 STL implementations. - template class unique_ptr : public std::unique_ptr { - public: - unique_ptr() {} - explicit unique_ptr(T* p) : std::unique_ptr(p) {} - unique_ptr(std::unique_ptr&& u) { *this = std::move(u); } - unique_ptr(unique_ptr&& u) { *this = std::move(u); } - unique_ptr& operator=(std::unique_ptr&& u) { - std::unique_ptr::reset(u.release()); - return *this; - } - unique_ptr& operator=(unique_ptr&& u) { - std::unique_ptr::reset(u.release()); - return *this; - } - unique_ptr& operator=(T* p) { - return std::unique_ptr::operator=(p); - } - }; - #endif // defined(FLATBUFFERS_TEMPLATES_ALIASES) -#else - // Very limited implementation of unique_ptr. - // This is provided simply to allow the C++ code generated from the default - // settings to function in C++98 environments with no modifications. - template class unique_ptr { - public: - typedef T element_type; - - unique_ptr() : ptr_(nullptr) {} - explicit unique_ptr(T* p) : ptr_(p) {} - unique_ptr(unique_ptr&& u) : ptr_(nullptr) { reset(u.release()); } - unique_ptr(const unique_ptr& u) : ptr_(nullptr) { - reset(const_cast(&u)->release()); - } - ~unique_ptr() { reset(); } - - unique_ptr& operator=(const unique_ptr& u) { - reset(const_cast(&u)->release()); - return *this; - } - - unique_ptr& operator=(unique_ptr&& u) { - reset(u.release()); - return *this; - } - - unique_ptr& operator=(T* p) { - reset(p); - return *this; - } - - const T& operator*() const { return *ptr_; } - T* operator->() const { return ptr_; } - T* get() const noexcept { return ptr_; } - explicit operator bool() const { return ptr_ != nullptr; } - - // modifiers - T* release() { - T* value = ptr_; - ptr_ = nullptr; - return value; - } - - void reset(T* p = nullptr) { - T* value = ptr_; - ptr_ = p; - if (value) delete value; - } - - void swap(unique_ptr& u) { - T* temp_ptr = ptr_; - ptr_ = u.ptr_; - u.ptr_ = temp_ptr; - } - - private: - T* ptr_; - }; - - template bool operator==(const unique_ptr& x, - const unique_ptr& y) { - return x.get() == y.get(); - } - - template bool operator==(const unique_ptr& x, - const D* y) { - return static_cast(x.get()) == y; - } - - template bool operator==(const unique_ptr& x, intptr_t y) { - return reinterpret_cast(x.get()) == y; - } - - template bool operator!=(const unique_ptr& x, decltype(nullptr)) { - return !!x; - } - - template bool operator!=(decltype(nullptr), const unique_ptr& x) { - return !!x; - } - - template bool operator==(const unique_ptr& x, decltype(nullptr)) { - return !x; - } - - template bool operator==(decltype(nullptr), const unique_ptr& x) { - return !x; - } - -#endif // !FLATBUFFERS_CPP98_STL - -} // namespace flatbuffers - -#endif // FLATBUFFERS_STL_EMULATION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/util.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/util.h deleted file mode 100644 index 0c924101..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/flatbuffers/include/flatbuffers/util.h +++ /dev/null @@ -1,683 +0,0 @@ -/* - * Copyright 2014 Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef FLATBUFFERS_UTIL_H_ -#define FLATBUFFERS_UTIL_H_ - -#include - -#include "flatbuffers/base.h" - -#ifndef FLATBUFFERS_PREFER_PRINTF -# include -#else // FLATBUFFERS_PREFER_PRINTF -# include -# include -#endif // FLATBUFFERS_PREFER_PRINTF - -#include -#include - -namespace flatbuffers { - -// @locale-independent functions for ASCII characters set. - -// Fast checking that character lies in closed range: [a <= x <= b] -// using one compare (conditional branch) operator. -inline bool check_ascii_range(char x, char a, char b) { - FLATBUFFERS_ASSERT(a <= b); - // (Hacker's Delight): `a <= x <= b` <=> `(x-a) <={u} (b-a)`. - // The x, a, b will be promoted to int and subtracted without overflow. - return static_cast(x - a) <= static_cast(b - a); -} - -// Case-insensitive isalpha -inline bool is_alpha(char c) { - // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). - return check_ascii_range(c & 0xDF, 'a' & 0xDF, 'z' & 0xDF); -} - -// Check (case-insensitive) that `c` is equal to alpha. -inline bool is_alpha_char(char c, char alpha) { - FLATBUFFERS_ASSERT(is_alpha(alpha)); - // ASCII only: alpha to upper case => reset bit 0x20 (~0x20 = 0xDF). - return ((c & 0xDF) == (alpha & 0xDF)); -} - -// https://en.cppreference.com/w/cpp/string/byte/isxdigit -// isdigit and isxdigit are the only standard narrow character classification -// functions that are not affected by the currently installed C locale. although -// some implementations (e.g. Microsoft in 1252 codepage) may classify -// additional single-byte characters as digits. -inline bool is_digit(char c) { return check_ascii_range(c, '0', '9'); } - -inline bool is_xdigit(char c) { - // Replace by look-up table. - return is_digit(c) || check_ascii_range(c & 0xDF, 'a' & 0xDF, 'f' & 0xDF); -} - -// Case-insensitive isalnum -inline bool is_alnum(char c) { return is_alpha(c) || is_digit(c); } - -// @end-locale-independent functions for ASCII character set - -#ifdef FLATBUFFERS_PREFER_PRINTF -template size_t IntToDigitCount(T t) { - size_t digit_count = 0; - // Count the sign for negative numbers - if (t < 0) digit_count++; - // Count a single 0 left of the dot for fractional numbers - if (-1 < t && t < 1) digit_count++; - // Count digits until fractional part - T eps = std::numeric_limits::epsilon(); - while (t <= (-1 + eps) || (1 - eps) <= t) { - t /= 10; - digit_count++; - } - return digit_count; -} - -template size_t NumToStringWidth(T t, int precision = 0) { - size_t string_width = IntToDigitCount(t); - // Count the dot for floating point numbers - if (precision) string_width += (precision + 1); - return string_width; -} - -template -std::string NumToStringImplWrapper(T t, const char *fmt, int precision = 0) { - size_t string_width = NumToStringWidth(t, precision); - std::string s(string_width, 0x00); - // Allow snprintf to use std::string trailing null to detect buffer overflow - snprintf(const_cast(s.data()), (s.size() + 1), fmt, string_width, t); - return s; -} -#endif // FLATBUFFERS_PREFER_PRINTF - -// Convert an integer or floating point value to a string. -// In contrast to std::stringstream, "char" values are -// converted to a string of digits, and we don't use scientific notation. -template std::string NumToString(T t) { - // clang-format off - - #ifndef FLATBUFFERS_PREFER_PRINTF - std::stringstream ss; - ss << t; - return ss.str(); - #else // FLATBUFFERS_PREFER_PRINTF - auto v = static_cast(t); - return NumToStringImplWrapper(v, "%.*lld"); - #endif // FLATBUFFERS_PREFER_PRINTF - // clang-format on -} -// Avoid char types used as character data. -template<> inline std::string NumToString(signed char t) { - return NumToString(static_cast(t)); -} -template<> inline std::string NumToString(unsigned char t) { - return NumToString(static_cast(t)); -} -template<> inline std::string NumToString(char t) { - return NumToString(static_cast(t)); -} -#if defined(FLATBUFFERS_CPP98_STL) -template<> inline std::string NumToString(long long t) { - char buf[21]; // (log((1 << 63) - 1) / log(10)) + 2 - snprintf(buf, sizeof(buf), "%lld", t); - return std::string(buf); -} - -template<> -inline std::string NumToString(unsigned long long t) { - char buf[22]; // (log((1 << 63) - 1) / log(10)) + 1 - snprintf(buf, sizeof(buf), "%llu", t); - return std::string(buf); -} -#endif // defined(FLATBUFFERS_CPP98_STL) - -// Special versions for floats/doubles. -template std::string FloatToString(T t, int precision) { - // clang-format off - - #ifndef FLATBUFFERS_PREFER_PRINTF - // to_string() prints different numbers of digits for floats depending on - // platform and isn't available on Android, so we use stringstream - std::stringstream ss; - // Use std::fixed to suppress scientific notation. - ss << std::fixed; - // Default precision is 6, we want that to be higher for doubles. - ss << std::setprecision(precision); - ss << t; - auto s = ss.str(); - #else // FLATBUFFERS_PREFER_PRINTF - auto v = static_cast(t); - auto s = NumToStringImplWrapper(v, "%0.*f", precision); - #endif // FLATBUFFERS_PREFER_PRINTF - // clang-format on - // Sadly, std::fixed turns "1" into "1.00000", so here we undo that. - auto p = s.find_last_not_of('0'); - if (p != std::string::npos) { - // Strip trailing zeroes. If it is a whole number, keep one zero. - s.resize(p + (s[p] == '.' ? 2 : 1)); - } - return s; -} - -template<> inline std::string NumToString(double t) { - return FloatToString(t, 12); -} -template<> inline std::string NumToString(float t) { - return FloatToString(t, 6); -} - -// Convert an integer value to a hexadecimal string. -// The returned string length is always xdigits long, prefixed by 0 digits. -// For example, IntToStringHex(0x23, 8) returns the string "00000023". -inline std::string IntToStringHex(int i, int xdigits) { - FLATBUFFERS_ASSERT(i >= 0); - // clang-format off - - #ifndef FLATBUFFERS_PREFER_PRINTF - std::stringstream ss; - ss << std::setw(xdigits) << std::setfill('0') << std::hex << std::uppercase - << i; - return ss.str(); - #else // FLATBUFFERS_PREFER_PRINTF - return NumToStringImplWrapper(i, "%.*X", xdigits); - #endif // FLATBUFFERS_PREFER_PRINTF - // clang-format on -} - -// clang-format off -// Use locale independent functions {strtod_l, strtof_l, strtoll_l, strtoull_l}. -#if defined(FLATBUFFERS_LOCALE_INDEPENDENT) && (FLATBUFFERS_LOCALE_INDEPENDENT > 0) - class ClassicLocale { - #ifdef _MSC_VER - typedef _locale_t locale_type; - #else - typedef locale_t locale_type; // POSIX.1-2008 locale_t type - #endif - ClassicLocale(); - ~ClassicLocale(); - locale_type locale_; - static ClassicLocale instance_; - public: - static locale_type Get() { return instance_.locale_; } - }; - - #ifdef _MSC_VER - #define __strtoull_impl(s, pe, b) _strtoui64_l(s, pe, b, ClassicLocale::Get()) - #define __strtoll_impl(s, pe, b) _strtoi64_l(s, pe, b, ClassicLocale::Get()) - #define __strtod_impl(s, pe) _strtod_l(s, pe, ClassicLocale::Get()) - #define __strtof_impl(s, pe) _strtof_l(s, pe, ClassicLocale::Get()) - #else - #define __strtoull_impl(s, pe, b) strtoull_l(s, pe, b, ClassicLocale::Get()) - #define __strtoll_impl(s, pe, b) strtoll_l(s, pe, b, ClassicLocale::Get()) - #define __strtod_impl(s, pe) strtod_l(s, pe, ClassicLocale::Get()) - #define __strtof_impl(s, pe) strtof_l(s, pe, ClassicLocale::Get()) - #endif -#else - #define __strtod_impl(s, pe) strtod(s, pe) - #define __strtof_impl(s, pe) static_cast(strtod(s, pe)) - #ifdef _MSC_VER - #define __strtoull_impl(s, pe, b) _strtoui64(s, pe, b) - #define __strtoll_impl(s, pe, b) _strtoi64(s, pe, b) - #else - #define __strtoull_impl(s, pe, b) strtoull(s, pe, b) - #define __strtoll_impl(s, pe, b) strtoll(s, pe, b) - #endif -#endif - -inline void strtoval_impl(int64_t *val, const char *str, char **endptr, - int base) { - *val = __strtoll_impl(str, endptr, base); -} - -inline void strtoval_impl(uint64_t *val, const char *str, char **endptr, - int base) { - *val = __strtoull_impl(str, endptr, base); -} - -inline void strtoval_impl(double *val, const char *str, char **endptr) { - *val = __strtod_impl(str, endptr); -} - -// UBSAN: double to float is safe if numeric_limits::is_iec559 is true. -__supress_ubsan__("float-cast-overflow") -inline void strtoval_impl(float *val, const char *str, char **endptr) { - *val = __strtof_impl(str, endptr); -} -#undef __strtoull_impl -#undef __strtoll_impl -#undef __strtod_impl -#undef __strtof_impl -// clang-format on - -// Adaptor for strtoull()/strtoll(). -// Flatbuffers accepts numbers with any count of leading zeros (-009 is -9), -// while strtoll with base=0 interprets first leading zero as octal prefix. -// In future, it is possible to add prefixed 0b0101. -// 1) Checks errno code for overflow condition (out of range). -// 2) If base <= 0, function try to detect base of number by prefix. -// -// Return value (like strtoull and strtoll, but reject partial result): -// - If successful, an integer value corresponding to the str is returned. -// - If full string conversion can't be performed, 0 is returned. -// - If the converted value falls out of range of corresponding return type, a -// range error occurs. In this case value MAX(T)/MIN(T) is returned. -template -inline bool StringToIntegerImpl(T *val, const char *const str, - const int base = 0, - const bool check_errno = true) { - // T is int64_t or uint64_T - FLATBUFFERS_ASSERT(str); - if (base <= 0) { - auto s = str; - while (*s && !is_digit(*s)) s++; - if (s[0] == '0' && is_alpha_char(s[1], 'X')) - return StringToIntegerImpl(val, str, 16, check_errno); - // if a prefix not match, try base=10 - return StringToIntegerImpl(val, str, 10, check_errno); - } else { - if (check_errno) errno = 0; // clear thread-local errno - auto endptr = str; - strtoval_impl(val, str, const_cast(&endptr), base); - if ((*endptr != '\0') || (endptr == str)) { - *val = 0; // erase partial result - return false; // invalid string - } - // errno is out-of-range, return MAX/MIN - if (check_errno && errno) return false; - return true; - } -} - -template -inline bool StringToFloatImpl(T *val, const char *const str) { - // Type T must be either float or double. - FLATBUFFERS_ASSERT(str && val); - auto end = str; - strtoval_impl(val, str, const_cast(&end)); - auto done = (end != str) && (*end == '\0'); - if (!done) *val = 0; // erase partial result - return done; -} - -// Convert a string to an instance of T. -// Return value (matched with StringToInteger64Impl and strtod): -// - If successful, a numeric value corresponding to the str is returned. -// - If full string conversion can't be performed, 0 is returned. -// - If the converted value falls out of range of corresponding return type, a -// range error occurs. In this case value MAX(T)/MIN(T) is returned. -template inline bool StringToNumber(const char *s, T *val) { - FLATBUFFERS_ASSERT(s && val); - int64_t i64; - // The errno check isn't needed, will return MAX/MIN on overflow. - if (StringToIntegerImpl(&i64, s, 0, false)) { - const int64_t max = (flatbuffers::numeric_limits::max)(); - const int64_t min = flatbuffers::numeric_limits::lowest(); - if (i64 > max) { - *val = static_cast(max); - return false; - } - if (i64 < min) { - // For unsigned types return max to distinguish from - // "no conversion can be performed" when 0 is returned. - *val = static_cast(flatbuffers::is_unsigned::value ? max : min); - return false; - } - *val = static_cast(i64); - return true; - } - *val = 0; - return false; -} - -template<> inline bool StringToNumber(const char *str, int64_t *val) { - return StringToIntegerImpl(val, str); -} - -template<> -inline bool StringToNumber(const char *str, uint64_t *val) { - if (!StringToIntegerImpl(val, str)) return false; - // The strtoull accepts negative numbers: - // If the minus sign was part of the input sequence, the numeric value - // calculated from the sequence of digits is negated as if by unary minus - // in the result type, which applies unsigned integer wraparound rules. - // Fix this behaviour (except -0). - if (*val) { - auto s = str; - while (*s && !is_digit(*s)) s++; - s = (s > str) ? (s - 1) : s; // step back to one symbol - if (*s == '-') { - // For unsigned types return the max to distinguish from - // "no conversion can be performed". - *val = (flatbuffers::numeric_limits::max)(); - return false; - } - } - return true; -} - -template<> inline bool StringToNumber(const char *s, float *val) { - return StringToFloatImpl(val, s); -} - -template<> inline bool StringToNumber(const char *s, double *val) { - return StringToFloatImpl(val, s); -} - -inline int64_t StringToInt(const char *s, int base = 10) { - int64_t val; - return StringToIntegerImpl(&val, s, base) ? val : 0; -} - -inline uint64_t StringToUInt(const char *s, int base = 10) { - uint64_t val; - return StringToIntegerImpl(&val, s, base) ? val : 0; -} - -typedef bool (*LoadFileFunction)(const char *filename, bool binary, - std::string *dest); -typedef bool (*FileExistsFunction)(const char *filename); - -LoadFileFunction SetLoadFileFunction(LoadFileFunction load_file_function); - -FileExistsFunction SetFileExistsFunction( - FileExistsFunction file_exists_function); - -// Check if file "name" exists. -bool FileExists(const char *name); - -// Check if "name" exists and it is also a directory. -bool DirExists(const char *name); - -// Load file "name" into "buf" returning true if successful -// false otherwise. If "binary" is false data is read -// using ifstream's text mode, otherwise data is read with -// no transcoding. -bool LoadFile(const char *name, bool binary, std::string *buf); - -// Save data "buf" of length "len" bytes into a file -// "name" returning true if successful, false otherwise. -// If "binary" is false data is written using ifstream's -// text mode, otherwise data is written with no -// transcoding. -bool SaveFile(const char *name, const char *buf, size_t len, bool binary); - -// Save data "buf" into file "name" returning true if -// successful, false otherwise. If "binary" is false -// data is written using ifstream's text mode, otherwise -// data is written with no transcoding. -inline bool SaveFile(const char *name, const std::string &buf, bool binary) { - return SaveFile(name, buf.c_str(), buf.size(), binary); -} - -// Functionality for minimalistic portable path handling. - -// The functions below behave correctly regardless of whether posix ('/') or -// Windows ('/' or '\\') separators are used. - -// Any new separators inserted are always posix. -FLATBUFFERS_CONSTEXPR char kPathSeparator = '/'; - -// Returns the path with the extension, if any, removed. -std::string StripExtension(const std::string &filepath); - -// Returns the extension, if any. -std::string GetExtension(const std::string &filepath); - -// Return the last component of the path, after the last separator. -std::string StripPath(const std::string &filepath); - -// Strip the last component of the path + separator. -std::string StripFileName(const std::string &filepath); - -// Concatenates a path with a filename, regardless of wether the path -// ends in a separator or not. -std::string ConCatPathFileName(const std::string &path, - const std::string &filename); - -// Replaces any '\\' separators with '/' -std::string PosixPath(const char *path); - -// This function ensure a directory exists, by recursively -// creating dirs for any parts of the path that don't exist yet. -void EnsureDirExists(const std::string &filepath); - -// Obtains the absolute path from any other path. -// Returns the input path if the absolute path couldn't be resolved. -std::string AbsolutePath(const std::string &filepath); - -// To and from UTF-8 unicode conversion functions - -// Convert a unicode code point into a UTF-8 representation by appending it -// to a string. Returns the number of bytes generated. -inline int ToUTF8(uint32_t ucc, std::string *out) { - FLATBUFFERS_ASSERT(!(ucc & 0x80000000)); // Top bit can't be set. - // 6 possible encodings: http://en.wikipedia.org/wiki/UTF-8 - for (int i = 0; i < 6; i++) { - // Max bits this encoding can represent. - uint32_t max_bits = 6 + i * 5 + static_cast(!i); - if (ucc < (1u << max_bits)) { // does it fit? - // Remaining bits not encoded in the first byte, store 6 bits each - uint32_t remain_bits = i * 6; - // Store first byte: - (*out) += static_cast((0xFE << (max_bits - remain_bits)) | - (ucc >> remain_bits)); - // Store remaining bytes: - for (int j = i - 1; j >= 0; j--) { - (*out) += static_cast(((ucc >> (j * 6)) & 0x3F) | 0x80); - } - return i + 1; // Return the number of bytes added. - } - } - FLATBUFFERS_ASSERT(0); // Impossible to arrive here. - return -1; -} - -// Converts whatever prefix of the incoming string corresponds to a valid -// UTF-8 sequence into a unicode code. The incoming pointer will have been -// advanced past all bytes parsed. -// returns -1 upon corrupt UTF-8 encoding (ignore the incoming pointer in -// this case). -inline int FromUTF8(const char **in) { - int len = 0; - // Count leading 1 bits. - for (int mask = 0x80; mask >= 0x04; mask >>= 1) { - if (**in & mask) { - len++; - } else { - break; - } - } - if ((static_cast(**in) << len) & 0x80) - return -1; // Bit after leading 1's must be 0. - if (!len) return *(*in)++; - // UTF-8 encoded values with a length are between 2 and 4 bytes. - if (len < 2 || len > 4) { return -1; } - // Grab initial bits of the code. - int ucc = *(*in)++ & ((1 << (7 - len)) - 1); - for (int i = 0; i < len - 1; i++) { - if ((**in & 0xC0) != 0x80) return -1; // Upper bits must 1 0. - ucc <<= 6; - ucc |= *(*in)++ & 0x3F; // Grab 6 more bits of the code. - } - // UTF-8 cannot encode values between 0xD800 and 0xDFFF (reserved for - // UTF-16 surrogate pairs). - if (ucc >= 0xD800 && ucc <= 0xDFFF) { return -1; } - // UTF-8 must represent code points in their shortest possible encoding. - switch (len) { - case 2: - // Two bytes of UTF-8 can represent code points from U+0080 to U+07FF. - if (ucc < 0x0080 || ucc > 0x07FF) { return -1; } - break; - case 3: - // Three bytes of UTF-8 can represent code points from U+0800 to U+FFFF. - if (ucc < 0x0800 || ucc > 0xFFFF) { return -1; } - break; - case 4: - // Four bytes of UTF-8 can represent code points from U+10000 to U+10FFFF. - if (ucc < 0x10000 || ucc > 0x10FFFF) { return -1; } - break; - } - return ucc; -} - -#ifndef FLATBUFFERS_PREFER_PRINTF -// Wraps a string to a maximum length, inserting new lines where necessary. Any -// existing whitespace will be collapsed down to a single space. A prefix or -// suffix can be provided, which will be inserted before or after a wrapped -// line, respectively. -inline std::string WordWrap(const std::string in, size_t max_length, - const std::string wrapped_line_prefix, - const std::string wrapped_line_suffix) { - std::istringstream in_stream(in); - std::string wrapped, line, word; - - in_stream >> word; - line = word; - - while (in_stream >> word) { - if ((line.length() + 1 + word.length() + wrapped_line_suffix.length()) < - max_length) { - line += " " + word; - } else { - wrapped += line + wrapped_line_suffix + "\n"; - line = wrapped_line_prefix + word; - } - } - wrapped += line; - - return wrapped; -} -#endif // !FLATBUFFERS_PREFER_PRINTF - -inline bool EscapeString(const char *s, size_t length, std::string *_text, - bool allow_non_utf8, bool natural_utf8) { - std::string &text = *_text; - text += "\""; - for (uoffset_t i = 0; i < length; i++) { - char c = s[i]; - switch (c) { - case '\n': text += "\\n"; break; - case '\t': text += "\\t"; break; - case '\r': text += "\\r"; break; - case '\b': text += "\\b"; break; - case '\f': text += "\\f"; break; - case '\"': text += "\\\""; break; - case '\\': text += "\\\\"; break; - default: - if (c >= ' ' && c <= '~') { - text += c; - } else { - // Not printable ASCII data. Let's see if it's valid UTF-8 first: - const char *utf8 = s + i; - int ucc = FromUTF8(&utf8); - if (ucc < 0) { - if (allow_non_utf8) { - text += "\\x"; - text += IntToStringHex(static_cast(c), 2); - } else { - // There are two cases here: - // - // 1) We reached here by parsing an IDL file. In that case, - // we previously checked for non-UTF-8, so we shouldn't reach - // here. - // - // 2) We reached here by someone calling GenerateText() - // on a previously-serialized flatbuffer. The data might have - // non-UTF-8 Strings, or might be corrupt. - // - // In both cases, we have to give up and inform the caller - // they have no JSON. - return false; - } - } else { - if (natural_utf8) { - // utf8 points to past all utf-8 bytes parsed - text.append(s + i, static_cast(utf8 - s - i)); - } else if (ucc <= 0xFFFF) { - // Parses as Unicode within JSON's \uXXXX range, so use that. - text += "\\u"; - text += IntToStringHex(ucc, 4); - } else if (ucc <= 0x10FFFF) { - // Encode Unicode SMP values to a surrogate pair using two \u - // escapes. - uint32_t base = ucc - 0x10000; - auto high_surrogate = (base >> 10) + 0xD800; - auto low_surrogate = (base & 0x03FF) + 0xDC00; - text += "\\u"; - text += IntToStringHex(high_surrogate, 4); - text += "\\u"; - text += IntToStringHex(low_surrogate, 4); - } - // Skip past characters recognized. - i = static_cast(utf8 - s - 1); - } - } - break; - } - } - text += "\""; - return true; -} - -inline std::string BufferToHexText(const void *buffer, size_t buffer_size, - size_t max_length, - const std::string &wrapped_line_prefix, - const std::string &wrapped_line_suffix) { - std::string text = wrapped_line_prefix; - size_t start_offset = 0; - const char *s = reinterpret_cast(buffer); - for (size_t i = 0; s && i < buffer_size; i++) { - // Last iteration or do we have more? - bool have_more = i + 1 < buffer_size; - text += "0x"; - text += IntToStringHex(static_cast(s[i]), 2); - if (have_more) { text += ','; } - // If we have more to process and we reached max_length - if (have_more && - text.size() + wrapped_line_suffix.size() >= start_offset + max_length) { - text += wrapped_line_suffix; - text += '\n'; - start_offset = text.size(); - text += wrapped_line_prefix; - } - } - text += wrapped_line_suffix; - return text; -} - -// Remove paired quotes in a string: "text"|'text' -> text. -std::string RemoveStringQuotes(const std::string &s); - -// Change th global C-locale to locale with name . -// Returns an actual locale name in <_value>, useful if locale_name is "" or -// null. -bool SetGlobalTestLocale(const char *locale_name, - std::string *_value = nullptr); - -// Read (or test) a value of environment variable. -bool ReadEnvironmentVariable(const char *var_name, - std::string *_value = nullptr); - -// MSVC specific: Send all CHECK reports to STDOUT to prevent CI hangs. -void SetupDefaultCRTReportMode(); - -} // namespace flatbuffers - -#endif // FLATBUFFERS_UTIL_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/LICENSE b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint.h deleted file mode 100644 index 9b7d6e14..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint.h +++ /dev/null @@ -1,900 +0,0 @@ -// Copyright 2015 The Gemmlowp Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// fixedpoint.h: fixed-point arithmetic, with basic operations and -// a few math functions such as tanh. - -#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_H_ -#define GEMMLOWP_INTERNAL_FIXEDPOINT_H_ - -#include -#include -#include -#include -#include - -#include "../internal/detect_platform.h" - -namespace gemmlowp { - -// Part 1: Low-level integer-arithmetic primitives. -// The implementations here are generic implementations valid for -// scalar types (e.g. std::int32_t). Architecture-specific SIMD types -// (e.g. NEON int32x4_t) may be supported by providing -// specializations for them in separate files. -// -// The purpose of these primitives is two-fold: -// - They will be used to implement higher-level fixed-point -// abstractions, namely the FixedPoint class and its arithmetic -// operators. -// - They will be directly used to implement some more involved -// fixed-point computations, e.g. the fixed-point implementation -// of math functions such as tanh. - -// Some compile-time traits around raw types to handle SIMD aspects: -// number of lanes, underlying scalar type. -template -struct FixedPointRawTypeTraits {}; - -template <> -struct FixedPointRawTypeTraits { - typedef std::int32_t ScalarRawType; - static constexpr int kLanes = 1; -}; - -template <> -struct FixedPointRawTypeTraits { - typedef std::int16_t ScalarRawType; - static constexpr int kLanes = 1; -}; - -// Returns a SIMD value duplicating a scalar value across all lanes. -template -tRawType Dup(typename FixedPointRawTypeTraits::ScalarRawType x) { - return x; -} - -// Plain bit-wise AND -template -tIntegerType BitAnd(tIntegerType a, tIntegerType b) { - return a & b; -} - -// Plain bit-wise OR -template -tIntegerType BitOr(tIntegerType a, tIntegerType b) { - return a | b; -} - -// Plain bit-wise XOR -template -tIntegerType BitXor(tIntegerType a, tIntegerType b) { - return a ^ b; -} - -// Plain bit-wise NOT -template -tIntegerType BitNot(tIntegerType a) { - return ~a; -} - -// Integer addition. Not saturating. Overflow is undefined behavior. -template -tIntegerType Add(tIntegerType a, tIntegerType b) { - return a + b; -} - -// Integer subtraction. Not saturating. Overflow is undefined behavior. -template -tIntegerType Mul(tIntegerType a, tIntegerType b) { - return a * b; -} - -template -tIntegerType Sub(tIntegerType a, tIntegerType b) { - return a - b; -} - -// Integer unary negative. Not saturating. Overflow is undefined behavior. -template -tIntegerType Neg(tIntegerType a) { - return -a; -} - -// Integer arithmetic left-shift, equivalent to multiplying with a power of two. -// Negative values are OK. In case of overflow, no Undefined -// Behavior, but the results are implementation-defined (in practice, -// they currently are saturated, but we make no commitment to that). The idea -// is that the caller will want to implement the overflowing cases with -// saturation with compare-and-mask, so we don't care about the results -// in the overflow case, we just want to avoid undefined behavior. -// -// tIntegerType may be int32 or any narrower signed type. -template -tIntegerType ShiftLeft(tIntegerType a, int offset) { - const std::int64_t wide_a = static_cast(a); - const std::int64_t wide_shifted = wide_a * (1 << offset); - const auto min = std::numeric_limits::min(); - const auto max = std::numeric_limits::max(); - return wide_shifted < min - ? min - : wide_shifted > max ? max - : static_cast(wide_shifted); -} - -// Integer arithmetic right-shift. Not rounding. -// Relying on implementation-defined, but in-practice-consistent, -// C++ compiler behavior. -template -tIntegerType ShiftRight(tIntegerType a, int offset) { - return a >> offset; -} - -// Each bit of the result is set to the corresponding bit of either then_val or -// else_val depending on whether the corresponding bit of if_mask is set. -// Equivalent to the VBSL instruction in ARM NEON. -template -tIntegerType SelectUsingMask(tIntegerType if_mask, tIntegerType then_val, - tIntegerType else_val) { - return BitXor(BitAnd(if_mask, then_val), BitAnd(BitNot(if_mask), else_val)); -} - -// For each input scalar, the corresponding bits of the result are set if the -// input scalar is non-zero. -template -tIntegerType MaskIfNonZero(tIntegerType a) { - static constexpr tIntegerType zero = 0; - return a ? BitNot(zero) : zero; -} - -// For each input scalar, the corresponding bits of the result are set if the -// input scalar is zero. -template -tIntegerType MaskIfZero(tIntegerType a) { - return MaskIfNonZero(!a); -} - -// For each pair of input scalars, the corresponding bits of the result are -// set if the input scalars are equal. -template -tIntegerType MaskIfEqual(tIntegerType a, tIntegerType b) { - return MaskIfNonZero(a == b); -} - -// For each pair of input scalars, the corresponding bits of the result are -// set if the input scalars are not equal. -template -tIntegerType MaskIfNotEqual(tIntegerType a, tIntegerType b) { - return MaskIfNonZero(a != b); -} - -// For each pair of input scalars, the corresponding bits of the result are -// set if the input scalars a, b satisfy a > b. -template -tIntegerType MaskIfGreaterThan(tIntegerType a, tIntegerType b) { - return MaskIfNonZero(a > b); -} - -// For each pair of input scalars, the corresponding bits of the result are -// set if the input scalars a, b satisfy a >= b. -template -tIntegerType MaskIfGreaterThanOrEqual(tIntegerType a, tIntegerType b) { - return MaskIfNonZero(a >= b); -} - -// For each pair of input scalars, the corresponding bits of the result are -// set if the input scalars a, b satisfy a < b. -template -tIntegerType MaskIfLessThan(tIntegerType a, tIntegerType b) { - return MaskIfNonZero(a < b); -} - -// For each pair of input scalars, the corresponding bits of the result are -// set if the input scalars a, b satisfy a <= b. -template -tIntegerType MaskIfLessThanOrEqual(tIntegerType a, tIntegerType b) { - return MaskIfNonZero(a <= b); -} - -// Returns true if all of the input scalars are nonzero. -// This function may currently assume that each of the input scalars has either -// all or none of its bits set. Otherwise, its behavior is currently undefined. -template -bool All(tIntegerType a) { - return a; -} - -// Returns true if any of the input scalars are nonzero. -// This function may currently assume that each of the input scalars has either -// all or none of its bits set. Otherwise, its behavior is currently undefined. -template -bool Any(tIntegerType a) { - return a; -} - -// Returns (a+b)/2, rounded to the nearest integer. -// Equivalent to VRHADD in the ARM NEON instruction set. -template -IntegerType RoundingHalfSum(IntegerType a, IntegerType b) { - static_assert(std::is_same::value, "unimplemented"); - (void)b; - return a; -} - -template <> -inline std::int32_t RoundingHalfSum(std::int32_t a, std::int32_t b) { - std::int64_t a64 = a; - std::int64_t b64 = b; - std::int64_t sum = a64 + b64; - std::int64_t sign = sum >= 0 ? 1 : -1; - return static_cast((sum + sign) / 2); -} - -template <> -inline std::int16_t RoundingHalfSum(std::int16_t a, std::int16_t b) { - std::int32_t a32 = a; - std::int32_t b32 = b; - std::int32_t sum = a32 + b32; - std::int32_t sign = sum >= 0 ? 1 : -1; - return static_cast((sum + sign) / 2); -} - -template -IntegerType SaturatingAdd(IntegerType a, IntegerType b) { - static_assert(std::is_same::value, "unimplemented"); - (void)b; - return a; -} - -// So far this is only needed for int16. -template <> -inline std::int16_t SaturatingAdd(std::int16_t a, std::int16_t b) { - std::int32_t a32 = a; - std::int32_t b32 = b; - std::int32_t sum = a32 + b32; - return static_cast( - std::min(static_cast(32767), - std::max(static_cast(-32768), sum))); -} - -// Returns a+b, saturating if the integers are 16bit or narrower, -// otherwise just a plain addition. -template -struct AddSaturatingIf16BitImpl { - static IntegerType Run(IntegerType a, IntegerType b) { return Add(a, b); } -}; -template -struct AddSaturatingIf16BitImpl { - static IntegerType Run(IntegerType a, IntegerType b) { - return SaturatingAdd(a, b); - } -}; -template -IntegerType AddSaturatingIf16Bit(IntegerType a, IntegerType b) { - using ScalarType = - typename FixedPointRawTypeTraits::ScalarRawType; - return AddSaturatingIf16BitImpl::Run(a, - b); -} - -// Returns the integer that represents the product of two fixed-point -// numbers, interpreting all integers as fixed-point values in the -// interval [-1, 1), rounding to the nearest value, and saturating -// -1 * -1 to the maximum value (since 1 is not in the half-open -// interval [-1, 1)). -// -// [The explanation below specializes to std::int32_t for example purpose.] -// -// The mapping between IntegerType and the interval [-1, 1) is unique and -// implied by IntegerType, which is assumed to be signed. For example, -// for IntegerType==std::int32_t, the mapping is -// real_value = integer_value / 2^31. -// So in this case, and leaving aside rounding and saturating, this -// function computes ((a / 2^31) * (b / 2^31)) * 2^31, which simplifies to -// (a * b) / 2^31. -// -// The 'doubling' part in the name of this function comes from the fact that -// this operation is very close to a "multiply-high" operation, keeping only -// the top half bits, except that that would be effectively computing -// (a * b) / 2^32, -// so here we are computing 2x that, since -// 1/2^31 = 2 * 1/2^32. -// The idea is to use all of the available 32 bits in the destination int32 -// value. -// -// [End of the explanation specializing to int32.] -// -// This is equivalent to the VQRDMULH instruction in ARM NEON. -template -IntegerType SaturatingRoundingDoublingHighMul(IntegerType a, IntegerType b) { - static_assert(std::is_same::value, "unimplemented"); - (void)b; - return a; -} - -// This function implements the same computation as the ARMv7 NEON VQRDMULH -// instruction. -template <> -inline std::int32_t SaturatingRoundingDoublingHighMul(std::int32_t a, - std::int32_t b) { - bool overflow = a == b && a == std::numeric_limits::min(); - std::int64_t a_64(a); - std::int64_t b_64(b); - std::int64_t ab_64 = a_64 * b_64; - std::int32_t nudge = ab_64 >= 0 ? (1 << 30) : (1 - (1 << 30)); - std::int32_t ab_x2_high32 = - static_cast((ab_64 + nudge) / (1ll << 31)); - return overflow ? std::numeric_limits::max() : ab_x2_high32; -} - -template <> -inline std::int16_t SaturatingRoundingDoublingHighMul(std::int16_t a, - std::int16_t b) { - bool overflow = a == b && a == std::numeric_limits::min(); - std::int32_t a_32(a); - std::int32_t b_32(b); - std::int32_t ab_32 = a_32 * b_32; - std::int16_t nudge = ab_32 >= 0 ? (1 << 14) : (1 - (1 << 14)); - std::int16_t ab_x2_high16 = - static_cast((ab_32 + nudge) / (1 << 15)); - return overflow ? std::numeric_limits::max() : ab_x2_high16; -} - -// Correctly-rounded-to-nearest division by a power-of-two. -// Also known as a rounding arithmetic right shift. -template -inline IntegerType RoundingDivideByPOT(IntegerType x, int exponent) { - assert(exponent >= 0); - assert(exponent <= 31); - const IntegerType mask = Dup((1ll << exponent) - 1); - const IntegerType zero = Dup(0); - const IntegerType one = Dup(1); - const IntegerType remainder = BitAnd(x, mask); - const IntegerType threshold = - Add(ShiftRight(mask, 1), BitAnd(MaskIfLessThan(x, zero), one)); - return Add(ShiftRight(x, exponent), - BitAnd(MaskIfGreaterThan(remainder, threshold), one)); -} - -// Returns the product of a run-time integer value by a compile-time power -// of two, with either a positive exponent (equivalent to an arithmetic -// left shift, saturating) or a negative exponent (equivalent to an arithmetic -// right shift, rounding to nearest). -template 0 ? 1 : Exponent < 0 ? -1 : 0)> -struct ImplSaturatingRoundingMultiplyByPOT {}; - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static IntegerType eval(IntegerType x) { return x; } -}; - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static IntegerType eval(IntegerType x) { - using ScalarIntegerType = - typename FixedPointRawTypeTraits::ScalarRawType; - const IntegerType min = - Dup(std::numeric_limits::min()); - const IntegerType max = - Dup(std::numeric_limits::max()); - const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType); - - const std::int32_t threshold = - ((1 << (ScalarIntegerTypeBits - 1 - Exponent)) - 1); - const IntegerType positive_mask = - MaskIfGreaterThan(x, Dup(threshold)); - const IntegerType negative_mask = - MaskIfLessThan(x, Dup(-threshold)); - - IntegerType result = ShiftLeft(x, Exponent); - result = SelectUsingMask(positive_mask, max, result); - result = SelectUsingMask(negative_mask, min, result); - return result; - } -}; - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static IntegerType eval(IntegerType x) { - return RoundingDivideByPOT(x, -Exponent); - } -}; - -template -IntegerType SaturatingRoundingMultiplyByPOT(IntegerType x) { - return ImplSaturatingRoundingMultiplyByPOT::eval(x); -} - -// Part 2: the FixedPoint class. - -// A FixedPoint object represents a fixed-point value stored in the underlying -// integer type tRawType, if tRawType is a plain scalar integer type. -// Alternatively, tRawType may be a SIMD type (e.g. NEON int32x4_t) in which -// case a FixedPoint object represents a corresponding SIMD vector of fixed -// point values. -// -// tIntegerBits describes the range of the fixed-point format: if -// tIntegerBits == m then the range of representable values is the half-open -// interval [-2^m; 2^m) where the open boundary on the right side means that -// 2^m is not representable (how close the maximum representable value is to -// it, depends on bit-depth of tRawType). -// -// In "Q format notation", -// https://en.wikipedia.org/wiki/Q_(number_format) -// we are describing the format -// Qm.n -// where -// m = tIntegerBits -// and -// n = NumberOfBits(tRawType) - (m + 1) -// Note that the (m + 1) in the above line is because we adopt the convention -// that we count the integer bits exclusively of the sign bit; so (m + 1) is -// the total number of integer bits inclusive of the sign bit. -// -// Accordingly, the number of integral representable values in our range -// [-2^m ; 2^m) -// is equal to 2^(m+1). -template -class FixedPoint { - public: - typedef tRawType RawType; - - typedef FixedPointRawTypeTraits RawTypeTraits; - typedef typename RawTypeTraits::ScalarRawType ScalarRawType; - - static constexpr int kTotalBits = 8 * sizeof(ScalarRawType); - static constexpr int kIntegerBits = tIntegerBits; - static constexpr int kFractionalBits = kTotalBits - 1 - kIntegerBits; - static_assert(kIntegerBits >= 0 && kIntegerBits < kTotalBits, - "bad IntegerBits"); - - typedef FixedPoint ScalarFixedPointType; - - static const ScalarRawType ScalarRawMin() { - return std::numeric_limits::min(); - } - - static const ScalarRawType ScalarRawMax() { - return std::numeric_limits::max(); - } - - static const ScalarRawType RawMin() { - return VectorFromScalar(ScalarRawMin()); - } - - static const ScalarRawType RawMax() { - return VectorFromScalar(ScalarRawMax()); - } - - static FixedPoint FromRaw(RawType x) { - FixedPoint retval; - retval.raw() = x; - return retval; - } - - static FixedPoint FromScalarRaw(ScalarRawType x) { - FixedPoint retval; - retval.raw() = Dup(x); - return retval; - } - - static FixedPoint FromScalarFixedPoint(ScalarFixedPointType x) { - return FromScalarRaw(x.raw()); - } - - template - static FixedPoint ConstantPOT() { - static constexpr int kOffset = kFractionalBits + Exponent; - static_assert( - kOffset < 31, - "Constant not exactly representable in this fixed-point format"); - return FromScalarRaw(ScalarRawType(1) << kOffset); - } - - static FixedPoint Zero() { return FromScalarRaw(0); } - - static FixedPoint One() { - return FromScalarRaw( - kIntegerBits == 0 - ? ScalarRawMax() - : (ScalarRawType(1) << (kIntegerBits == 0 ? 0 : kFractionalBits))); - } - - static FixedPoint FromDouble(double x) { - const double min_bound = static_cast(ScalarRawMin()); - const double max_bound = static_cast(ScalarRawMax()); - return FromScalarRaw(static_cast(std::min( - std::max(round(x * static_cast(1ll << kFractionalBits)), - min_bound), - max_bound))); - } - - RawType raw() const { return i_; } - RawType& raw() { return i_; } - - private: - RawType i_; -}; - -// Part 3: implementation of arithmetic operators for the -// FixedPoint class, and a few related functions. - -// A FixedPoint multiplication is just a -// SaturatingRoundingDoublingHighMul operation on the underlying -// raw integer values. The IntegerBits simply add up, as is obvious -// from the fact that the range is [-2^IntegerBits, 2^IntegerBits). -template -FixedPoint operator*( - FixedPoint a, - FixedPoint b) { - FixedPoint c; - c.raw() = SaturatingRoundingDoublingHighMul(a.raw(), b.raw()); - return c; -} - -// Tweaking IntegerBits gives exact multiplication by a power of two. -template -FixedPoint ExactMulByPot( - FixedPoint a) { - FixedPoint c; - c.raw() = a.raw(); - return c; -} - -// If we want to leave IntegerBits fixed, then multiplication -// by a power of two has to be saturating/rounding, not exact anymore. -template -FixedPoint SaturatingRoundingMultiplyByPOT( - FixedPoint a) { - return FixedPoint::FromRaw( - SaturatingRoundingMultiplyByPOT(a.raw())); -} - -// Generic arithmetic operators. - -#define MAKE_FIXEDPOINT_UNARY_FUNC(FuncName, ImplFuncName) \ - template \ - FixedPoint FuncName( \ - FixedPoint a) { \ - return FixedPoint::FromRaw(ImplFuncName(a.raw())); \ - } - -#define MAKE_FIXEDPOINT_BINARY_FUNC(FuncName, ImplFuncName) \ - template \ - FixedPoint FuncName( \ - FixedPoint a, \ - FixedPoint b) { \ - return FixedPoint::FromRaw( \ - ImplFuncName(a.raw(), b.raw())); \ - } - -MAKE_FIXEDPOINT_UNARY_FUNC(operator-, Neg) -MAKE_FIXEDPOINT_UNARY_FUNC(operator~, BitNot) -MAKE_FIXEDPOINT_BINARY_FUNC(operator+, Add) -MAKE_FIXEDPOINT_BINARY_FUNC(operator-, Sub) -MAKE_FIXEDPOINT_BINARY_FUNC(operator&, BitAnd) -MAKE_FIXEDPOINT_BINARY_FUNC(operator^, BitXor) -MAKE_FIXEDPOINT_BINARY_FUNC(operator|, BitOr) -MAKE_FIXEDPOINT_BINARY_FUNC(RoundingHalfSum, RoundingHalfSum) - -#undef MAKE_FIXEDPOINT_UNARY_FUNC -#undef MAKE_FIXEDPOINT_BINARY_FUNC - -#define MAKE_FIXEDPOINT_UNARY_FUNC_RETURNING_RAW(FuncName) \ - template \ - tRawType FuncName(FixedPoint a) { \ - return FuncName(a.raw()); \ - } - -#define MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(FuncName) \ - template \ - tRawType FuncName(FixedPoint a, \ - FixedPoint b) { \ - return FuncName(a.raw(), b.raw()); \ - } - -MAKE_FIXEDPOINT_UNARY_FUNC_RETURNING_RAW(MaskIfZero) -MAKE_FIXEDPOINT_UNARY_FUNC_RETURNING_RAW(MaskIfNonZero) -MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(MaskIfEqual) -MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(MaskIfNotEqual) -MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(MaskIfGreaterThan) -MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(MaskIfGreaterThanOrEqual) -MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(MaskIfLessThan) -MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW(MaskIfLessThanOrEqual) - -#undef MAKE_FIXEDPOINT_UNARY_FUNC_RETURNING_RAW -#undef MAKE_FIXEDPOINT_BINARY_FUNC_RETURNING_RAW - -template -FixedPoint SelectUsingMask( - tRawType if_mask, FixedPoint then_val, - FixedPoint else_val) { - return FixedPoint::FromRaw( - SelectUsingMask(if_mask, then_val.raw(), else_val.raw())); -} - -template -bool operator==(FixedPoint a, - FixedPoint b) { - return All(MaskIfEqual(a.raw(), b.raw())); -} - -template -bool operator!=(FixedPoint a, - FixedPoint b) { - return !(a == b); -} - -template -FixedPoint SaturatingAdd( - FixedPoint a, - FixedPoint b) { - return FixedPoint::FromRaw( - SaturatingAdd(a.raw(), b.raw())); -} - -template -FixedPoint AddSaturatingIf16Bit( - FixedPoint a, - FixedPoint b) { - return FixedPoint::FromRaw( - AddSaturatingIf16Bit(a.raw(), b.raw())); -} - -// Conversion to floating-point. -template -double ToDouble(FixedPoint x) { - static_assert(FixedPointRawTypeTraits::kLanes == 1, - "not applicable to SIMD types"); - typedef FixedPoint F; - return x.raw() / static_cast(1ll << F::kFractionalBits); -} - -// Rescale changes the number of IntegerBits and updates the underlying -// raw integer value accordingly. -template -FixedPoint Rescale( - FixedPoint x) { - static constexpr int kExponent = tIntegerBitsSrc - tIntegerBitsDst; - FixedPoint result; - result.raw() = SaturatingRoundingMultiplyByPOT(x.raw()); - return result; -} - -// CheckedFixedPointConstant allows to specify fixed-point constants -// initialized as real numbers, in a way that does not compile floating-point -// arithmetic in production code, yet still checks agreement with the -// floating-point expressions when asserts are enabled. -// -// The raw integer value provided is always a int32, encoding a 32-bit -// fixed-point value, regardless of the actual Scalar type. This allows -// writing generic code that applies just as well to the 32-bit and 16-bit -// cases. In the 16-bit case, the raw integer value is internally -// rounding-shifted by 16 bits to the right. -template -inline typename FixedPointType::ScalarRawType RescaleConstantInitializer( - std::int32_t int32_value) { - typedef typename FixedPointType::ScalarRawType ScalarRawType; - static constexpr int ScalarTypeBits = 8 * sizeof(ScalarRawType); - return static_cast( - RoundingDivideByPOT(int32_value, 32 - ScalarTypeBits)); -} -#ifdef GEMMLOWP_ENABLE_FIXEDPOINT_CONSTANTS_CHECKS -template -FixedPointType CheckedFixedPointConstant(std::int32_t raw_value, - double double_value) { - const FixedPointType result = FixedPointType::FromScalarRaw(raw_value); - assert(result == FixedPointType::FromDouble(double_value)); - return result; -} -#define GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPointType, \ - ScalarRawInt32Value, DoubleValue) \ - (gemmlowp::CheckedFixedPointConstant( \ - gemmlowp::RescaleConstantInitializer( \ - ScalarRawInt32Value), \ - DoubleValue)) - -#else -#define GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPointType, \ - ScalarRawInt32Value, DoubleValue) \ - (FixedPointType::FromScalarRaw( \ - gemmlowp::RescaleConstantInitializer( \ - ScalarRawInt32Value))) -#endif - -// Implementation of exponential function. - -// Returns exp(x) for x in [-1/4, 0). -template -FixedPoint exp_on_interval_between_negative_one_quarter_and_0_excl( - FixedPoint a) { - typedef FixedPoint F; - const F constant_term = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F, 1895147668, std::exp(-1.0 / 8.0)); - const F constant_1_over_3 = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F, 715827883, 1.0 / 3.0); - // We're evaluating a Taylor expansion around -1/8, so we do the change of - // variable: x = a + 1/8. - // In fixed-point with 0 integer bits, 1/8 is represented by 1 << 28. - F x = a + F::template ConstantPOT<-3>(); - F x2 = x * x; - F x3 = x2 * x; - F x4 = x2 * x2; - F x4_over_4 = SaturatingRoundingMultiplyByPOT<-2>(x4); - F x4_over_24_plus_x3_over_6_plus_x2_over_2 = - SaturatingRoundingMultiplyByPOT<-1>( - ((x4_over_4 + x3) * constant_1_over_3) + x2); - return AddSaturatingIf16Bit( - constant_term, - constant_term * (x + x4_over_24_plus_x3_over_6_plus_x2_over_2)); -} - -// Returns exp(x) for x < 0. -template -FixedPoint exp_on_negative_values( - FixedPoint a) { - typedef FixedPoint InputF; - typedef FixedPoint ResultF; - static constexpr int kFractionalBits = InputF::kFractionalBits; - static constexpr int kIntegerBits = InputF::kIntegerBits; - const InputF kOneQuarter = InputF::template ConstantPOT<-2>(); - InputF mask = kOneQuarter - InputF::FromScalarRaw(1); - InputF a_mod_quarter_minus_one_quarter = (a & mask) - kOneQuarter; - ResultF result = exp_on_interval_between_negative_one_quarter_and_0_excl( - Rescale<0>(a_mod_quarter_minus_one_quarter)); - tRawType remainder = (a_mod_quarter_minus_one_quarter - a).raw(); - -#define GEMMLOWP_EXP_BARREL_SHIFTER(Exponent, FixedPointMultiplier) \ - if (kIntegerBits > Exponent) { \ - const ResultF kMultiplier = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT( \ - ResultF, FixedPointMultiplier, std::exp(-std::pow(2.0, Exponent))); \ - static constexpr int kShiftAmount = \ - kIntegerBits > Exponent ? kFractionalBits + Exponent : 0; \ - result = SelectUsingMask( \ - MaskIfNonZero(BitAnd(remainder, Dup(1 << kShiftAmount))), \ - result * kMultiplier, result); \ - } - - GEMMLOWP_EXP_BARREL_SHIFTER(-2, 1672461947); - GEMMLOWP_EXP_BARREL_SHIFTER(-1, 1302514674); - GEMMLOWP_EXP_BARREL_SHIFTER(+0, 790015084); - GEMMLOWP_EXP_BARREL_SHIFTER(+1, 290630308); - GEMMLOWP_EXP_BARREL_SHIFTER(+2, 39332535); - GEMMLOWP_EXP_BARREL_SHIFTER(+3, 720401); - GEMMLOWP_EXP_BARREL_SHIFTER(+4, 242); - -#undef GEMMLOWP_EXP_BARREL_SHIFTER - - static constexpr int clampB = kIntegerBits > 5 ? 36 - kIntegerBits : 0; - if (kIntegerBits > 5) { - const InputF clamp = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(InputF, -(1 << clampB), -32.0); - result = SelectUsingMask(MaskIfLessThan(a, clamp), ResultF::Zero(), result); - } - - result = SelectUsingMask(MaskIfZero(a), ResultF::One(), result); - return result; -} - -// Implementation of tanh: (1 - exp(-2x)) / (1 + exp(-2x)). - -// Returns (1 - x) / (1 + x) for x in (0, 1). -template -FixedPoint one_minus_x_over_one_plus_x_for_x_in_0_1( - FixedPoint a) { - typedef FixedPoint F0; - typedef FixedPoint F2; - F0 half_denominator = RoundingHalfSum(a, F0::One()); - // Newton-Raphson division - // https://en.wikipedia.org/wiki/Division_algorithm#Newton.E2.80.93Raphson_division - // Refer to that page for the logic behind the 48/17 and 32/17 constants. - const F2 constant_48_over_17 = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F2, 1515870810, 48.0 / 17.0); - const F2 constant_neg_32_over_17 = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F2, -1010580540, -32.0 / 17.0); - F2 x = constant_48_over_17 + half_denominator * constant_neg_32_over_17; - for (int i = 0; i < 3; i++) { - F2 half_denominator_times_x = half_denominator * x; - F2 one_minus_half_denominator_times_x = - F2::One() - half_denominator_times_x; - x = x + Rescale<2>(x * one_minus_half_denominator_times_x); - } - return Rescale<0>(x - F2::One()); -} - -// Returns -tanh(x) for x < 0. -template -FixedPoint neg_tanh_on_negative_values( - FixedPoint a) { - return one_minus_x_over_one_plus_x_for_x_in_0_1( - exp_on_negative_values(ExactMulByPot<1>(a))); -} - -// Returns tanh(x) for any x. -template -FixedPoint tanh(FixedPoint a) { - typedef FixedPoint InputF; - typedef FixedPoint ResultF; - tRawType mask_if_negative = MaskIfLessThan(a, InputF::Zero()); - tRawType mask_if_zero = MaskIfZero(a); - InputF n = SelectUsingMask(mask_if_negative, a, -a); - ResultF t = neg_tanh_on_negative_values(n); - return SelectUsingMask(mask_if_zero, ResultF::Zero(), - SelectUsingMask(mask_if_negative, -t, t)); -} - -// Implementation of logistic function. - -// Returns 1 / (1 + x) for x in (0, 1). -template -FixedPoint one_over_one_plus_x_for_x_in_0_1( - FixedPoint a) { - typedef FixedPoint F0; - typedef FixedPoint F2; - F0 half_denominator = RoundingHalfSum(a, F0::One()); - // Newton-Raphson division - // https://en.wikipedia.org/wiki/Division_algorithm#Newton.E2.80.93Raphson_division - // Refer to that page for the logic behind the 48/17 and 32/17 constants. - const F2 constant_48_over_17 = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F2, 1515870810, 48.0 / 17.0); - const F2 constant_neg_32_over_17 = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F2, -1010580540, -32.0 / 17.0); - F2 x = constant_48_over_17 + half_denominator * constant_neg_32_over_17; - for (int i = 0; i < 3; i++) { - F2 half_denominator_times_x = half_denominator * x; - F2 one_minus_half_denominator_times_x = - F2::One() - half_denominator_times_x; - x = x + Rescale<2>(x * one_minus_half_denominator_times_x); - } - return Rescale<0>(ExactMulByPot<-1>(x)); -} - -// Returns logistic(x) = 1 / (1 + exp(-x)) for x > 0. -template -FixedPoint logistic_on_positive_values( - FixedPoint a) { - return one_over_one_plus_x_for_x_in_0_1(exp_on_negative_values(-a)); -} - -// Returns logistic(x) = 1 / (1 + exp(-x)) for any x. -template -FixedPoint logistic(FixedPoint a) { - typedef FixedPoint InputF; - typedef FixedPoint ResultF; - tRawType mask_if_positive = MaskIfGreaterThan(a, InputF::Zero()); - tRawType mask_if_zero = MaskIfZero(a); - InputF abs_input = SelectUsingMask(mask_if_positive, a, -a); - ResultF result_if_positive = logistic_on_positive_values(abs_input); - ResultF result_if_negative = ResultF::One() - result_if_positive; - const ResultF one_half = - GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(ResultF, 1 << 30, 0.5); - return SelectUsingMask(mask_if_zero, one_half, - SelectUsingMask(mask_if_positive, result_if_positive, - result_if_negative)); -} - -} // end namespace gemmlowp - -#ifdef GEMMLOWP_NEON -#include "./fixedpoint_neon.h" -#elif defined(GEMMLOWP_AVX2) -#include "./fixedpoint_avx.h" -#elif defined(GEMMLOWP_SSE4) -#include "./fixedpoint_sse.h" -#elif defined(GEMMLOWP_MSA) -#include "./fixedpoint_msa.h" -#endif - -#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint_neon.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint_neon.h deleted file mode 100644 index 646c5907..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint_neon.h +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2015 The Gemmlowp Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// fixedpoint_neon.h: optimized NEON specializations of the templates -// in fixedpoint.h. - -#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_ -#define GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_ - -#include - -namespace gemmlowp { - -template <> -struct FixedPointRawTypeTraits { - typedef std::int32_t ScalarRawType; - static constexpr int kLanes = 4; -}; - -template <> -struct FixedPointRawTypeTraits { - typedef std::int16_t ScalarRawType; - static constexpr int kLanes = 8; -}; - -template <> -inline int32x4_t BitAnd(int32x4_t a, int32x4_t b) { - return vandq_s32(a, b); -} - -template <> -inline int16x8_t BitAnd(int16x8_t a, int16x8_t b) { - return vandq_s16(a, b); -} - -template <> -inline int32x4_t BitOr(int32x4_t a, int32x4_t b) { - return vorrq_s32(a, b); -} - -template <> -inline int16x8_t BitOr(int16x8_t a, int16x8_t b) { - return vorrq_s16(a, b); -} - -template <> -inline int32x4_t BitXor(int32x4_t a, int32x4_t b) { - return veorq_s32(a, b); -} - -template <> -inline int16x8_t BitXor(int16x8_t a, int16x8_t b) { - return veorq_s16(a, b); -} - -template <> -inline int32x4_t BitNot(int32x4_t a) { - return veorq_s32(a, vdupq_n_s32(-1)); -} - -template <> -inline int16x8_t BitNot(int16x8_t a) { - return veorq_s16(a, vdupq_n_s16(-1)); -} - -template <> -inline int32x4_t Add(int32x4_t a, int32x4_t b) { - return vaddq_s32(a, b); -} - -template <> -inline int16x8_t Add(int16x8_t a, int16x8_t b) { - return vaddq_s16(a, b); -} - -template <> -inline int32x4_t Sub(int32x4_t a, int32x4_t b) { - return vsubq_s32(a, b); -} - -template <> -inline int16x8_t Sub(int16x8_t a, int16x8_t b) { - return vsubq_s16(a, b); -} - -template <> -inline int32x4_t Neg(int32x4_t a) { - return vnegq_s32(a); -} - -template <> -inline int16x8_t Neg(int16x8_t a) { - return vnegq_s16(a); -} - -template <> -inline int32x4_t ShiftLeft(int32x4_t a, int offset) { - return vshlq_s32(a, vdupq_n_s32(offset)); -} - -template <> -inline int16x8_t ShiftLeft(int16x8_t a, int offset) { - return vshlq_s16(a, vdupq_n_s16(offset)); -} - -template <> -inline int32x4_t ShiftRight(int32x4_t a, int offset) { - return vshlq_s32(a, vdupq_n_s32(-offset)); -} - -template <> -inline int16x8_t ShiftRight(int16x8_t a, int offset) { - return vshlq_s16(a, vdupq_n_s16(-offset)); -} - -template <> -inline int32x4_t SelectUsingMask(int32x4_t if_mask, int32x4_t then_val, - int32x4_t else_val) { - return vbslq_s32(vreinterpretq_u32_s32(if_mask), then_val, else_val); -} - -template <> -inline int16x8_t SelectUsingMask(int16x8_t if_mask, int16x8_t then_val, - int16x8_t else_val) { - return vbslq_s16(vreinterpretq_u16_s16(if_mask), then_val, else_val); -} - -template <> -inline int32x4_t MaskIfEqual(int32x4_t a, int32x4_t b) { - return vreinterpretq_s32_u32(vceqq_s32(a, b)); -} - -template <> -inline int16x8_t MaskIfEqual(int16x8_t a, int16x8_t b) { - return vreinterpretq_s16_u16(vceqq_s16(a, b)); -} - -template <> -inline int32x4_t MaskIfNotEqual(int32x4_t a, int32x4_t b) { - return BitNot(MaskIfEqual(a, b)); -} - -template <> -inline int16x8_t MaskIfNotEqual(int16x8_t a, int16x8_t b) { - return BitNot(MaskIfEqual(a, b)); -} - -template <> -inline int32x4_t MaskIfZero(int32x4_t a) { - return MaskIfEqual(a, vdupq_n_s32(0)); -} - -template <> -inline int16x8_t MaskIfZero(int16x8_t a) { - return MaskIfEqual(a, vdupq_n_s16(0)); -} - -template <> -inline int32x4_t MaskIfNonZero(int32x4_t a) { - return vreinterpretq_s32_u32(vtstq_s32(a, a)); -} - -template <> -inline int16x8_t MaskIfNonZero(int16x8_t a) { - return vreinterpretq_s16_u16(vtstq_s16(a, a)); -} - -template <> -inline int32x4_t MaskIfGreaterThan(int32x4_t a, int32x4_t b) { - return vreinterpretq_s32_u32(vcgtq_s32(a, b)); -} - -template <> -inline int16x8_t MaskIfGreaterThan(int16x8_t a, int16x8_t b) { - return vreinterpretq_s16_u16(vcgtq_s16(a, b)); -} - -template <> -inline int32x4_t MaskIfGreaterThanOrEqual(int32x4_t a, int32x4_t b) { - return vreinterpretq_s32_u32(vcgeq_s32(a, b)); -} - -template <> -inline int16x8_t MaskIfGreaterThanOrEqual(int16x8_t a, int16x8_t b) { - return vreinterpretq_s16_u16(vcgeq_s16(a, b)); -} - -template <> -inline int32x4_t MaskIfLessThan(int32x4_t a, int32x4_t b) { - return vreinterpretq_s32_u32(vcltq_s32(a, b)); -} - -template <> -inline int16x8_t MaskIfLessThan(int16x8_t a, int16x8_t b) { - return vreinterpretq_s16_u16(vcltq_s16(a, b)); -} - -template <> -inline int32x4_t MaskIfLessThanOrEqual(int32x4_t a, int32x4_t b) { - return vreinterpretq_s32_u32(vcleq_s32(a, b)); -} - -template <> -inline int16x8_t MaskIfLessThanOrEqual(int16x8_t a, int16x8_t b) { - return vreinterpretq_s16_u16(vcleq_s16(a, b)); -} - -template <> -inline bool All(int32x4_t a) { - a = vandq_s32(a, vextq_s32(a, a, 1)); - a = vandq_s32(a, vextq_s32(a, a, 2)); - return vgetq_lane_s32(a, 0); -} - -template <> -inline bool All(int16x8_t a) { - a = vandq_s16(a, vextq_s16(a, a, 1)); - a = vandq_s16(a, vextq_s16(a, a, 2)); - a = vandq_s16(a, vextq_s16(a, a, 4)); - return vgetq_lane_s16(a, 0); -} - -template <> -inline bool Any(int32x4_t a) { - a = vorrq_s32(a, vextq_s32(a, a, 1)); - a = vorrq_s32(a, vextq_s32(a, a, 2)); - return vgetq_lane_s32(a, 0); -} - -template <> -inline bool Any(int16x8_t a) { - a = vorrq_s16(a, vextq_s16(a, a, 1)); - a = vorrq_s16(a, vextq_s16(a, a, 2)); - a = vorrq_s16(a, vextq_s16(a, a, 4)); - return vgetq_lane_s16(a, 0); -} - -template <> -inline int32x4_t RoundingHalfSum(int32x4_t a, int32x4_t b) { - return vrhaddq_s32(a, b); -} - -template <> -inline int16x8_t RoundingHalfSum(int16x8_t a, int16x8_t b) { - return vrhaddq_s16(a, b); -} - -template <> -inline int32x4_t SaturatingRoundingDoublingHighMul(int32x4_t a, int32x4_t b) { - return vqrdmulhq_s32(a, b); -} - -template <> -inline int16x8_t SaturatingRoundingDoublingHighMul(int16x8_t a, int16x8_t b) { - return vqrdmulhq_s16(a, b); -} - -template <> -inline int32x4_t RoundingDivideByPOT(int32x4_t x, int exponent) { - const int32x4_t shift_vec = vdupq_n_s32(-exponent); - const int32x4_t fixup = vshrq_n_s32(vandq_s32(x, shift_vec), 31); - const int32x4_t fixed_up_x = vqaddq_s32(x, fixup); - return vrshlq_s32(fixed_up_x, shift_vec); -} - -template <> -inline int16x8_t RoundingDivideByPOT(int16x8_t x, int exponent) { - const int16x8_t shift_vec = vdupq_n_s16(-exponent); - const int16x8_t fixup = vshrq_n_s16(vandq_s16(x, shift_vec), 15); - const int16x8_t fixed_up_x = vqaddq_s16(x, fixup); - return vrshlq_s16(fixed_up_x, shift_vec); -} - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static int32x4_t eval(int32x4_t x) { return vqshlq_n_s32(x, Exponent); } -}; - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static int32x4_t eval(int32x4_t x) { - const int32x4_t fixup = vshrq_n_s32(x, 31); - const int32x4_t fixed_up_x = vqaddq_s32(x, fixup); - return vrshrq_n_s32(fixed_up_x, -Exponent); - } -}; - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static int16x8_t eval(int16x8_t x) { return vqshlq_n_s16(x, Exponent); } -}; - -template -struct ImplSaturatingRoundingMultiplyByPOT { - static int16x8_t eval(int16x8_t x) { - const int16x8_t fixup = vshrq_n_s16(x, 15); - const int16x8_t fixed_up_x = vqaddq_s16(x, fixup); - return vrshrq_n_s16(fixed_up_x, -Exponent); - } -}; - -template <> -inline int32x4_t Dup(std::int32_t x) { - return vdupq_n_s32(x); -} - -template <> -inline int16x8_t Dup(std::int16_t x) { - return vdupq_n_s16(x); -} - -// So far this is only needed for int16. -template <> -inline int16x8_t SaturatingAdd(int16x8_t a, int16x8_t b) { - return vqaddq_s16(a, b); -} - -} // end namespace gemmlowp - -#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_NEON_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint_sse.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint_sse.h deleted file mode 100644 index a1fae32d..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/fixedpoint/fixedpoint_sse.h +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// fixedpoint_SSE.h: optimized SSE specializations of the templates -// in fixedpoint.h. - -#ifndef GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_ -#define GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_ - -#include -#include "fixedpoint.h" - -namespace gemmlowp { - -// SSE intrinsics are not finely typed: there is a single __m128i vector -// type that does not distinguish between "int32x4" and "int16x8" use -// cases, unlike the NEON equivalents. Because we had initially focused -// on int32x4, we did not pay attention and specialized these fixedpoint -// templates directly for __m128i hardcoding the int32x4 semantics, -// not leaving room for int16x8 semantics. Amending that by adding a separate -// data type, int16x8_m128i, that wraps __m128i while being a separate -// type. -struct int16x8_m128i { - int16x8_m128i() {} - explicit int16x8_m128i(__m128i w) : v(w) {} - ~int16x8_m128i() {} - - __m128i v; -}; - -template <> -struct FixedPointRawTypeTraits<__m128i> { - typedef std::int32_t ScalarRawType; - static constexpr int kLanes = 4; -}; - -template <> -struct FixedPointRawTypeTraits { - typedef std::int16_t ScalarRawType; - static constexpr int kLanes = 8; -}; - -template <> -inline __m128i BitAnd(__m128i a, __m128i b) { - return _mm_and_si128(a, b); -} - -template <> -inline int16x8_m128i BitAnd(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_and_si128(a.v, b.v)); -} - -template <> -inline __m128i BitOr(__m128i a, __m128i b) { - return _mm_or_si128(a, b); -} - -template <> -inline int16x8_m128i BitOr(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_or_si128(a.v, b.v)); -} - -template <> -inline __m128i BitXor(__m128i a, __m128i b) { - return _mm_xor_si128(a, b); -} - -template <> -inline int16x8_m128i BitXor(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_xor_si128(a.v, b.v)); -} - -template <> -inline __m128i BitNot(__m128i a) { - return _mm_andnot_si128(a, _mm_set1_epi32(-1)); -} - -template <> -inline int16x8_m128i BitNot(int16x8_m128i a) { - return int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1))); -} - -template <> -inline __m128i Add(__m128i a, __m128i b) { - return _mm_add_epi32(a, b); -} - -template <> -inline int16x8_m128i Add(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_add_epi16(a.v, b.v)); -} - -template <> -inline __m128i Mul(__m128i a, __m128i b) { - return _mm_mullo_epi32(a, b); -} - -template <> -inline int16x8_m128i Mul(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_mullo_epi16(a.v, b.v)); -} - -template <> -inline __m128i Sub(__m128i a, __m128i b) { - return _mm_sub_epi32(a, b); -} - -template <> -inline int16x8_m128i Sub(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_sub_epi16(a.v, b.v)); -} - -template <> -inline __m128i Neg(__m128i a) { - return _mm_sign_epi32(a, _mm_set1_epi32(-1)); -} - -template <> -inline int16x8_m128i Neg(int16x8_m128i a) { - return int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1))); -} - -template <> -inline __m128i ShiftLeft(__m128i a, int offset) { - return _mm_slli_epi32(a, offset); -} - -template <> -inline int16x8_m128i ShiftLeft(int16x8_m128i a, int offset) { - return int16x8_m128i(_mm_slli_epi16(a.v, offset)); -} - -template <> -inline __m128i ShiftRight(__m128i a, int offset) { - return _mm_srai_epi32(a, offset); -} - -template <> -inline int16x8_m128i ShiftRight(int16x8_m128i a, int offset) { - return int16x8_m128i(_mm_srai_epi16(a.v, offset)); -} - -template <> -inline __m128i SelectUsingMask(__m128i if_mask, __m128i then_val, - __m128i else_val) { - // borrowed from Intel's arm_neon_sse.h header. - return _mm_or_si128(_mm_and_si128(if_mask, then_val), - _mm_andnot_si128(if_mask, else_val)); -} - -template <> -inline int16x8_m128i SelectUsingMask(int16x8_m128i if_mask, - int16x8_m128i then_val, - int16x8_m128i else_val) { - // borrowed from Intel's arm_neon_sse.h header. - return int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v)); -} - -template <> -inline __m128i MaskIfEqual(__m128i a, __m128i b) { - return _mm_cmpeq_epi32(a, b); -} - -template <> -inline int16x8_m128i MaskIfEqual(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v)); -} - -template <> -inline __m128i MaskIfNotEqual(__m128i a, __m128i b) { - return BitNot(MaskIfEqual(a, b)); -} - -template <> -inline int16x8_m128i MaskIfNotEqual(int16x8_m128i a, int16x8_m128i b) { - return BitNot(MaskIfEqual(a, b)); -} - -template <> -inline __m128i MaskIfZero(__m128i a) { - return MaskIfEqual(a, _mm_set1_epi32(0)); -} - -template <> -inline int16x8_m128i MaskIfZero(int16x8_m128i a) { - return MaskIfEqual(a, int16x8_m128i(_mm_set1_epi16(0))); -} - -template <> -inline __m128i MaskIfNonZero(__m128i a) { - return MaskIfNotEqual(a, _mm_set1_epi32(0)); -} - -template <> -inline int16x8_m128i MaskIfNonZero(int16x8_m128i a) { - return MaskIfNotEqual(a, int16x8_m128i(_mm_set1_epi16(0))); -} - -template <> -inline __m128i MaskIfGreaterThan(__m128i a, __m128i b) { - return _mm_cmpgt_epi32(a, b); -} - -template <> -inline int16x8_m128i MaskIfGreaterThan(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v)); -} - -template <> -inline __m128i MaskIfLessThan(__m128i a, __m128i b) { - return _mm_cmplt_epi32(a, b); -} - -template <> -inline int16x8_m128i MaskIfLessThan(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_cmplt_epi16(a.v, b.v)); -} - -template <> -inline __m128i MaskIfGreaterThanOrEqual(__m128i a, __m128i b) { - return BitNot(MaskIfLessThan(a, b)); -} - -template <> -inline int16x8_m128i MaskIfGreaterThanOrEqual(int16x8_m128i a, - int16x8_m128i b) { - return BitNot(MaskIfLessThan(a, b)); -} - -template <> -inline __m128i MaskIfLessThanOrEqual(__m128i a, __m128i b) { - return BitNot(MaskIfGreaterThan(a, b)); -} - -template <> -inline int16x8_m128i MaskIfLessThanOrEqual(int16x8_m128i a, int16x8_m128i b) { - return BitNot(MaskIfGreaterThan(a, b)); -} - -/* Assumptions: - - All and Any are used on masks. - - masks are all_ones for true lanes, all_zeroes otherwise. -Hence, All means all 128bits set, and Any means any bit set. -*/ - -template <> -inline bool All(__m128i a) { - return _mm_testc_si128(a, a); -} - -template <> -inline bool All(int16x8_m128i a) { - return _mm_testc_si128(a.v, a.v); -} - -template <> -inline bool Any(__m128i a) { - return !_mm_testz_si128(a, a); -} - -template <> -inline bool Any(int16x8_m128i a) { - return !_mm_testz_si128(a.v, a.v); -} - -template <> -inline __m128i RoundingHalfSum(__m128i a, __m128i b) { - /* __m128i round_bit_mask, a_over_2, b_over_2, round_bit, sum; */ - /* We divide the inputs before the add to avoid the overflow and costly test - */ - /* of checking if an overflow occured on signed add */ - /* round_bit_mask = _mm_set1_epi32(1); */ - /* a_over_2 = _mm_srai_epi32(a, 1); */ - /* b_over_2 = _mm_srai_epi32(b, 1); */ - /* sum = Add(a_over_2, b_over_2); */ - /* round_bit = _mm_sign_epi32(BitAnd(BitOr(a,b), round_bit_mask), sum); */ - /* return Add(sum, round_bit); */ - - /* Other possibility detecting overflow and xor the sign if an overflow - * happened*/ - __m128i one, sign_bit_mask, sum, rounded_half_sum, overflow, result; - one = _mm_set1_epi32(1); - sign_bit_mask = _mm_set1_epi32(0x80000000); - sum = Add(a, b); - rounded_half_sum = _mm_srai_epi32(Add(sum, one), 1); - overflow = - BitAnd(BitAnd(BitXor(a, rounded_half_sum), BitXor(b, rounded_half_sum)), - sign_bit_mask); - result = BitXor(rounded_half_sum, overflow); - return result; -} - -template <> -inline int16x8_m128i RoundingHalfSum(int16x8_m128i a, int16x8_m128i b) { - // Idea: go to unsigned to use _mm_avg_epu16, - // borrowed from Intel's arm_neon_sse.h header. - __m128i constant_neg_32768 = _mm_set1_epi16(-32768); - __m128i a_unsigned = _mm_sub_epi16(a.v, constant_neg_32768); - __m128i b_unsigned = _mm_sub_epi16(b.v, constant_neg_32768); - __m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned); - __m128i avg = _mm_add_epi16(avg_unsigned, constant_neg_32768); - return int16x8_m128i(avg); -} - -template <> -inline __m128i SaturatingRoundingDoublingHighMul(__m128i a, __m128i b) { - __m128i min, saturation_mask, a0_a2, a1_a3, b0_b2, b1_b3; - __m128i a0b0_a2b2, a1b1_a3b3, a0b0_a2b2_rounded, a1b1_a3b3_rounded; - __m128i a0b0_a2b2_rounded_2x, a1b1_a3b3_rounded_2x, result; - __m128i nudge; - - // saturation only happen if a == b == INT_MIN - min = _mm_set1_epi32(std::numeric_limits::min()); - saturation_mask = BitAnd(MaskIfEqual(a, b), MaskIfEqual(a, min)); - - // a = a0 | a1 | a2 | a3 - // b = b0 | b1 | b2 | b3 - a0_a2 = a; - a1_a3 = _mm_srli_si128(a, 4); - b0_b2 = b; - b1_b3 = _mm_srli_si128(b, 4); - - a0b0_a2b2 = _mm_mul_epi32(a0_a2, b0_b2); - a1b1_a3b3 = _mm_mul_epi32(a1_a3, b1_b3); - - // do the rounding and take into account that it will be doubled - nudge = _mm_set1_epi64x(1 << 30); - a0b0_a2b2_rounded = _mm_add_epi64(a0b0_a2b2, nudge); - a1b1_a3b3_rounded = _mm_add_epi64(a1b1_a3b3, nudge); - - // do the doubling - a0b0_a2b2_rounded_2x = _mm_slli_epi64(a0b0_a2b2_rounded, 1); - a1b1_a3b3_rounded_2x = _mm_slli_epi64(a1b1_a3b3_rounded, 1); - - // get the high part of the products - result = _mm_blend_epi16(_mm_srli_si128(a0b0_a2b2_rounded_2x, 4), - a1b1_a3b3_rounded_2x, 0xcc); - - // saturate those which overflowed - return SelectUsingMask(saturation_mask, min, result); -} - -template <> -inline int16x8_m128i SaturatingRoundingDoublingHighMul(int16x8_m128i a, - int16x8_m128i b) { - // Idea: use _mm_mulhrs_epi16 then saturate with a bit-operation, - // borrowed from Intel's arm_neon_sse.h header. - __m128i result_unsaturated = _mm_mulhrs_epi16(a.v, b.v); - __m128i saturation_mask = - _mm_cmpeq_epi16(result_unsaturated, _mm_set1_epi16(0x8000)); - __m128i result = _mm_xor_si128(result_unsaturated, saturation_mask); - return int16x8_m128i(result); -} - -template <> -inline __m128i Dup<__m128i>(std::int32_t x) { - return _mm_set1_epi32(x); -} - -template <> -inline int16x8_m128i Dup(std::int16_t x) { - return int16x8_m128i(_mm_set1_epi16(x)); -} - -// So far this is only needed for int16. -template <> -inline int16x8_m128i SaturatingAdd(int16x8_m128i a, int16x8_m128i b) { - return int16x8_m128i(_mm_adds_epi16(a.v, b.v)); -} - -} // end namespace gemmlowp - -#endif // GEMMLOWP_INTERNAL_FIXEDPOINT_SSE_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/internal/detect_platform.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/internal/detect_platform.h deleted file mode 100644 index 6f06d19f..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/gemmlowp/internal/detect_platform.h +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2018 The Gemmlowp Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// detect_platform.h: Sets up macros that control architecture-specific -// features of gemmlowp's implementation. - -#ifndef GEMMLOWP_INTERNAL_DETECT_PLATFORM_H_ -#define GEMMLOWP_INTERNAL_DETECT_PLATFORM_H_ - -// Our inline assembly path assume GCC/Clang syntax. -// Native Client doesn't seem to support inline assembly(?). -#if defined(__GNUC__) && !defined(__native_client__) -#define GEMMLOWP_ALLOW_INLINE_ASM -#endif - -// Define macro statement that avoids inlining for GCC. -// For non-GCC, define as empty macro. -#if defined(__GNUC__) -#define GEMMLOWP_NOINLINE __attribute__((noinline)) -#else -#define GEMMLOWP_NOINLINE -#endif - -// Detect ARM, 32-bit or 64-bit -#ifdef __arm__ -#define GEMMLOWP_ARM_32 -#endif - -#ifdef __aarch64__ -#define GEMMLOWP_ARM_64 -#endif - -#if defined(GEMMLOWP_ARM_32) || defined(GEMMLOWP_ARM_64) -#define GEMMLOWP_ARM -#endif - -// Detect MIPS, 32-bit or 64-bit -#if defined(__mips) && !defined(__LP64__) -#define GEMMLOWP_MIPS_32 -#endif - -#if defined(__mips) && defined(__LP64__) -#define GEMMLOWP_MIPS_64 -#endif - -#if defined(GEMMLOWP_MIPS_32) || defined(GEMMLOWP_MIPS_64) -#define GEMMLOWP_MIPS -#endif - -// Detect x86, 32-bit or 64-bit -#if defined(__i386__) || defined(_M_IX86) || defined(_X86_) || defined(__i386) -#define GEMMLOWP_X86_32 -#endif - -#if defined(__x86_64__) || defined(_M_X64) || defined(__amd64) -#define GEMMLOWP_X86_64 -#endif - -#if defined(GEMMLOWP_X86_32) || defined(GEMMLOWP_X86_64) -#define GEMMLOWP_X86 -#endif - -// Some of our optimized paths use inline assembly and for -// now we don't bother enabling some other optimized paths using intrinddics -// where we can't use inline assembly paths. -#ifdef GEMMLOWP_ALLOW_INLINE_ASM - -// Detect NEON. It's important to check for both tokens. -#if (defined __ARM_NEON) || (defined __ARM_NEON__) -#define GEMMLOWP_NEON -#endif - -// Convenience NEON tokens for 32-bit or 64-bit -#if defined(GEMMLOWP_NEON) && defined(GEMMLOWP_ARM_32) -#define GEMMLOWP_NEON_32 -#endif - -#if defined(GEMMLOWP_NEON) && defined(GEMMLOWP_ARM_64) -#define GEMMLOWP_NEON_64 -#endif - -// Detect MIPS MSA. -// Limit MSA optimizations to little-endian CPUs for now. -// TODO: Perhaps, eventually support MSA optimizations on big-endian CPUs? -#if defined(GEMMLOWP_MIPS) && (__mips_isa_rev >= 5) && defined(__mips_msa) && \ - defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) -#define GEMMLOWP_MSA -#endif - -// Convenience MIPS MSA tokens for 32-bit or 64-bit. -#if defined(GEMMLOWP_MSA) && defined(GEMMLOWP_MIPS_32) -#define GEMMLOWP_MSA_32 -#endif - -#if defined(GEMMLOWP_MSA) && defined(GEMMLOWP_MIPS_64) -#define GEMMLOWP_MSA_64 -#endif - -// compiler define for AVX2 -D GEMMLOWP_ENABLE_AVX2 -// Detect AVX2 -#if defined(__AVX2__) && defined(GEMMLOWP_ENABLE_AVX2) -#define GEMMLOWP_AVX2 -// Detect SSE4. -// MSVC does not have __SSE4_1__ macro, but will enable SSE4 -// when AVX is turned on. -#elif defined(__SSE4_1__) || (defined(_MSC_VER) && defined(__AVX__)) -#define GEMMLOWP_SSE4 -// Detect SSE3. -#elif defined(__SSE3__) -#define GEMMLOWP_SSE3 -#endif - -// Convenience SSE4 tokens for 32-bit or 64-bit -#if defined(GEMMLOWP_SSE4) && defined(GEMMLOWP_X86_32) && \ - !defined(GEMMLOWP_DISABLE_SSE4) -#define GEMMLOWP_SSE4_32 -#endif - -#if defined(GEMMLOWP_SSE3) && defined(GEMMLOWP_X86_32) -#define GEMMLOWP_SSE3_32 -#endif - -#if defined(GEMMLOWP_SSE4) && defined(GEMMLOWP_X86_64) && \ - !defined(GEMMLOWP_DISABLE_SSE4) -#define GEMMLOWP_SSE4_64 -#endif - -#if defined(GEMMLOWP_SSE3) && defined(GEMMLOWP_X86_64) -#define GEMMLOWP_SSE3_64 -#endif - -#if defined(GEMMLOWP_AVX2) && defined(GEMMLOWP_X86_64) -#define GEMMLOWP_AVX2_64 -#endif - -#if defined(__has_feature) -#if __has_feature(memory_sanitizer) -#include -#define GEMMLOWP_MARK_MEMORY_AS_INITIALIZED __msan_unpoison -#elif __has_feature(address_sanitizer) -#include -#define GEMMLOWP_MARK_MEMORY_AS_INITIALIZED __asan_unpoison_memory_region -#endif -#endif - -#endif // GEMMLOWP_ALLOW_INLINE_ASM - -// Detect Android. Don't conflate with ARM - we care about tuning -// for non-ARM Android devices too. This can be used in conjunction -// with x86 to tune differently for mobile x86 CPUs (Atom) vs. desktop x86 CPUs. -#if defined(__ANDROID__) || defined(ANDROID) -#define GEMMLOWP_ANDROID -#endif - -#endif // GEMMLOWP_INTERNAL_DETECT_PLATFORM_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/ruy/ruy/profiler/instrumentation.h b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/ruy/ruy/profiler/instrumentation.h deleted file mode 100644 index c4df1e68..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/source/third_party/ruy/ruy/profiler/instrumentation.h +++ /dev/null @@ -1,203 +0,0 @@ -/* Copyright 2020 Google LLC. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef RUY_RUY_PROFILER_INSTRUMENTATION_H_ -#define RUY_RUY_PROFILER_INSTRUMENTATION_H_ - -#ifdef RUY_PROFILER -#include -#include -#include -#endif - -namespace ruy { -namespace profiler { - -#ifdef RUY_PROFILER - -// A label is how a code scope is annotated to appear in profiles. -// The stacks that are sampled by the profiler are stacks of such labels. -// A label consists of a literal string, plus optional integer arguments. -class Label { - public: - Label() {} - template - explicit Label(Args... args) { - Set(args...); - } - void Set(const char* format) { - format_ = format; - args_count_ = 0; - } - template - void Set(const char* format, Args... args) { - format_ = format; - args_count_ = sizeof...(args); - SetArgs(0, args...); - } - - void operator=(const Label& other); - - bool operator==(const Label& other) const; - - std::string Formatted() const; - const char* format() const { return format_; } - - private: - void SetArgs(int position, int arg0) { args_[position] = arg0; } - - template - void SetArgs(int position, int arg0, Args... args) { - SetArgs(position, arg0); - SetArgs(position + 1, args...); - } - - static constexpr int kMaxArgs = 4; - const char* format_ = nullptr; - int args_count_ = 0; - int args_[kMaxArgs]; -}; - -namespace detail { - -// Forward-declaration, see class ThreadStack below. -class ThreadStack; - -bool& GlobalIsProfilerRunning(); - -// Returns the global vector of pointers to all stacks, there being one stack -// per thread executing instrumented code. -std::vector* GlobalAllThreadStacks(); - -// Returns the mutex to be locked around any access to GlobalAllThreadStacks(). -std::mutex* GlobalsMutex(); - -// Returns the thread-local stack, specific to the current thread. -ThreadStack* ThreadLocalThreadStack(); - -// This 'stack' is what may be more appropriately called a 'pseudostack': -// It contains Label entries that are 'manually' entered by instrumentation -// code. It's unrelated to real call stacks. -struct Stack { - std::uint32_t id = 0; - static constexpr int kMaxSize = 64; - int size = 0; - Label labels[kMaxSize]; -}; - -// Returns the buffer byte size required by CopyToSample. -int GetBufferSize(const Stack& stack); - -// Copies this Stack into a byte buffer, called a 'sample'. -void CopyToBuffer(const Stack& stack, char* dst); - -// Populates this Stack from an existing sample buffer, typically -// produced by CopyToSample. -void ReadFromBuffer(const char* src, Stack* stack); - -// ThreadStack is meant to be used as a thread-local singleton, assigning to -// each thread a Stack object holding its pseudo-stack of profile labels, -// plus a mutex allowing to synchronize accesses to this pseudo-stack between -// this thread and a possible profiler thread sampling it. -class ThreadStack { - public: - ThreadStack(); - ~ThreadStack(); - - const Stack& stack() const { return stack_; } - - // Returns the mutex to lock around any access to this stack. Each stack is - // accessed by potentially two threads: the thread that it belongs to - // (which calls Push and Pop) and the profiler thread during profiling - // (which calls CopyToSample). - std::mutex& Mutex() const { return mutex_; } - - // Pushes a new label on the top of this Stack. - template - void Push(Args... args) { - // This mutex locking is needed to guard against race conditions as both - // the current thread and the profiler thread may be concurrently accessing - // this stack. In addition to that, this mutex locking also serves the other - // purpose of acting as a barrier (of compiler code reordering, of runtime - // CPU instruction reordering, and of memory access reordering), which - // gives a measure of correctness to this profiler. The downside is some - // latency. As this lock will be uncontended most of the times, the cost - // should be roughly that of an sequentially-consistent atomic access, - // comparable to an access to the level of CPU data cache that is shared - // among all cores, typically 60 cycles on current ARM CPUs, plus side - // effects from barrier instructions. - std::lock_guard lock(mutex_); - // Avoid overrunning the stack, even in 'release' builds. This profiling - // instrumentation code should not ship in release builds anyway, the - // overhead of this check is negligible, and overrunning a stack array would - // be bad. - if (stack_.size >= Stack::kMaxSize) { - abort(); - } - stack_.labels[stack_.size++].Set(args...); - } - - // Pops the top-most label from this Stack. - void Pop() { - // See the comment in Push about this lock. While it would be tempting to - // try to remove this lock and just atomically decrement size_ with a - // store-release, that would not necessarily be a substitute for all of the - // purposes that this lock serves, or if it was done carefully to serve all - // of the same purposes, then that wouldn't be faster than this (mostly - // uncontended) lock. - std::lock_guard lock(mutex_); - stack_.size--; - } - - private: - mutable std::mutex mutex_; - Stack stack_; -}; - -} // namespace detail - -// RAII user-facing way to construct Labels associated with their life scope -// and get them pushed to / popped from the current thread stack. -class ScopeLabel { - public: - template - ScopeLabel(Args... args) : thread_stack_(detail::ThreadLocalThreadStack()) { - thread_stack_->Push(args...); - } - - ~ScopeLabel() { thread_stack_->Pop(); } - - private: - detail::ThreadStack* thread_stack_; -}; - -#else // no RUY_PROFILER - -class ScopeLabel { - public: - template - explicit ScopeLabel(Args...) {} - - // This destructor is needed to consistently silence clang's -Wunused-variable - // which seems to trigger semi-randomly. - ~ScopeLabel() {} -}; - -#endif - -} // namespace profiler -} // namespace ruy - -#endif // RUY_RUY_PROFILER_INSTRUMENTATION_H_ diff --git a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/tf_fini_fix.c b/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/tf_fini_fix.c deleted file mode 100644 index ee7b740d..00000000 --- a/Ubiquitous/XiUOS/framework/intelligent/tflite-mcu/tf_fini_fix.c +++ /dev/null @@ -1,4 +0,0 @@ -// fix '_fini' not found on gcc risvc 8.2.0 -#if (__riscv == 1) && (__GNUC__ == 8) && (__GNUC_MINOR__ == 2) -void _fini (void) {} -#endif \ No newline at end of file