From 99d3d54ac4d17474a81c94ec5bab36f55f470359 Mon Sep 17 00:00:00 2001 From: Archit Gupta <71798289+archigup@users.noreply.github.com> Date: Thu, 15 Dec 2022 21:46:32 +0000 Subject: [PATCH 01/62] Fix array-bounds compiler warning on gcc11+ in list.h (#580) listGET_OWNER_OF_NEXT_ENTRY computes `( pxConstList )->pxIndex->pxNext` after verifying that `( pxConstList )->pxIndex` points to `xListEnd`, which due to being a MiniListItem_t, can be shorter than a ListItem_t. Thus, `( pxConstList )->pxIndex` is a `ListItem_t *` that extends past the end of the `List_t` whose `xListEnd` it points to. This is fixed by accessing `pxNext` through a `MiniListItem_t` instead. --- include/list.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/list.h b/include/list.h index 11241b68687..248212260a1 100644 --- a/include/list.h +++ b/include/list.h @@ -290,7 +290,7 @@ typedef struct xLIST ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ { \ - ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + ( pxConstList )->pxIndex = ( pxConstList )->xListEnd.pxNext; \ } \ ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ } From 6d65558ba01141d7c50ff6f93a4054cc5f16940e Mon Sep 17 00:00:00 2001 From: tcpluess Date: Mon, 19 Dec 2022 10:48:07 +0100 Subject: [PATCH 02/62] move the prototype for vApplicationIdleHook to task.h. (#600) Co-authored-by: pluess Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- include/task.h | 18 ++++++++++++++++++ tasks.c | 8 +------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/task.h b/include/task.h index a3548296b97..b01551d5366 100644 --- a/include/task.h +++ b/include/task.h @@ -1649,6 +1649,24 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif +#if ( configUSE_IDLE_HOOK == 1 ) + +/** + * task.h + * @code{c} + * void vApplicationIdleHook( void ); + * @endcode + * + * The application idle hook is called by the idle task. + * This allows the application designer to add background functionality without + * the overhead of a separate task. + * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, CALL A FUNCTION THAT MIGHT BLOCK. + */ + void vApplicationIdleHook( void ); + +#endif + + #if ( configUSE_TICK_HOOK > 0 ) /** diff --git a/tasks.c b/tasks.c index c9cf459829d..d4d8fb06f2d 100644 --- a/tasks.c +++ b/tasks.c @@ -3477,13 +3477,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters ) #if ( configUSE_IDLE_HOOK == 1 ) { - extern void vApplicationIdleHook( void ); - - /* Call the user defined function from within the idle task. This - * allows the application designer to add background functionality - * without the overhead of a separate task. - * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, - * CALL A FUNCTION THAT MIGHT BLOCK. */ + /* Call the user defined function from within the idle task. */ vApplicationIdleHook(); } #endif /* configUSE_IDLE_HOOK */ From bb6071e1df3168a64dc2ce79de8aa91b7995ba23 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Fri, 6 Jan 2023 10:42:13 +0800 Subject: [PATCH 03/62] Update equal priority task preemption (#603) * vTaskResume and vTaskPrioritySet don't preempt equal priority task * Update vTaskResumeAll not to preempt task with equal priority * Fix in xTaskResumeFromISR --- tasks.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tasks.c b/tasks.c index d4d8fb06f2d..3a0f0470e78 100644 --- a/tasks.c +++ b/tasks.c @@ -1552,7 +1552,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* The priority of a task other than the currently * running task is being raised. Is the priority being * raised above that of the running task? */ - if( uxNewPriority >= pxCurrentTCB->uxPriority ) + if( uxNewPriority > pxCurrentTCB->uxPriority ) { xYieldRequired = pdTRUE; } @@ -1845,7 +1845,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) prvAddTaskToReadyList( pxTCB ); /* A higher priority task may have just been resumed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { /* This yield may not cause the task just resumed to run, * but will leave the lists in the correct state for the @@ -1913,7 +1913,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { xYieldRequired = pdTRUE; @@ -2203,9 +2203,9 @@ BaseType_t xTaskResumeAll( void ) listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); - /* If the moved task has a priority higher than or equal to - * the current task then a yield must be performed. */ - if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority ) + /* If the moved task has a priority higher than the current + * task then a yield must be performed. */ + if( pxTCB->uxPriority > pxCurrentTCB->uxPriority ) { xYieldPending = pdTRUE; } From 8592fd23f4fbc1a2fa821a9b89b61ef1f5652a09 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Mon, 16 Jan 2023 15:06:18 +0530 Subject: [PATCH 04/62] Update FreeRTOS/FreeRTOS build checks (#613) This is needed to be compatible with the refactoring done in this PR - https://github.com/FreeRTOS/FreeRTOS/pull/889 Signed-off-by: Gaurav Aggarwal Signed-off-by: Gaurav Aggarwal --- .github/workflows/kernel-checks.yml | 25 ---- .github/workflows/kernel-demos.yml | 173 ++++++++++++++++++++++++++++ 2 files changed, 173 insertions(+), 25 deletions(-) create mode 100644 .github/workflows/kernel-demos.yml diff --git a/.github/workflows/kernel-checks.yml b/.github/workflows/kernel-checks.yml index 889a53e6c22..b295033d28a 100644 --- a/.github/workflows/kernel-checks.yml +++ b/.github/workflows/kernel-checks.yml @@ -45,28 +45,3 @@ jobs: cd inspect .github/scripts/kernel_checker.py --json ${HOME}/files_modified.json ${HOME}/files_added.json ${HOME}/files_renamed.json exit $? - build-checker: - name: FreeRTOS Posix Build Check - runs-on: ubuntu-latest - steps: - - name: Checkout the parent repository - uses: actions/checkout@v2 - with: - ref: main - repository: FreeRTOS/FreeRTOS - submodules: 'recursive' - fetch-depth: 1 - path: ./workspace - - name: Checkout the current repository - uses: actions/checkout@v2 - with: - path: ./workspace/FreeRTOS/Source - - name: Posix Build Checker - run: | - bash workspace/.github/scripts/posix_build_checker.sh workspace - - name: Install lib pcap dev - run: | - sudo apt-get install libpcap-dev - - name: Posix Network Build Checker - run: | - bash workspace/.github/scripts/posix_network_build_checker.sh workspace diff --git a/.github/workflows/kernel-demos.yml b/.github/workflows/kernel-demos.yml new file mode 100644 index 00000000000..d6eace575ae --- /dev/null +++ b/.github/workflows/kernel-demos.yml @@ -0,0 +1,173 @@ +name: FreeRTOS-Kernel Demos +on: [push, pull_request] + +jobs: + WIN32-MSVC: + name: WIN32 MSVC + runs-on: windows-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + - name: Fetch Kernel Submodule + shell: bash + run: | + git submodule update --checkout --init --depth 1 FreeRTOS/Source + + - name: Add msbuild to PATH + uses: microsoft/setup-msbuild@v1.1 + + - name: Build WIN32-MSVC Demo + working-directory: FreeRTOS/Demo/WIN32-MSVC + run: msbuild WIN32.sln -t:rebuild + + - name: Build WIN32-MSVC-Static-Allocation-Only Demo + working-directory: FreeRTOS/Demo/WIN32-MSVC-Static-Allocation-Only + run: msbuild WIN32.sln -t:rebuild + + WIN32-MingW: + name: WIN32 MingW + runs-on: windows-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + - name: Fetch Kernel Submodule + shell: bash + run: | + git submodule update --checkout --init --depth 1 FreeRTOS/Source + + - name: Build WIN32-MingW Demo + working-directory: FreeRTOS/Demo/WIN32-MingW + run: | + gcc --version + make --version + make + + POSIX-GCC: + name: Native GCC + runs-on: ubuntu-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + - name: Fetch Kernel Submodule + shell: bash + run: git submodule update --checkout --init --depth 1 FreeRTOS/Source + + - name: Install GCC + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install build-essential + + - name: Build Posix_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/Posix_GCC + run: make -j + + MSP430-GCC: + name: GNU MSP430 Toolchain + runs-on: ubuntu-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + - name: Fetch Kernel Submodule + shell: bash + run: git submodule update --checkout --init --depth 1 FreeRTOS/Source + + - name: Install MSP430 Toolchain + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install gcc-msp430 build-essential + + - name: Build msp430_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/msp430_GCC + run: make -j + + ARM-GCC: + name: GNU ARM Toolchain + runs-on: ubuntu-latest + steps: + - name: Checkout the FreeRTOS/FreeRTOS Repository + uses: actions/checkout@v2 + with: + ref: main + repository: FreeRTOS/FreeRTOS + submodules: 'recursive' + fetch-depth: 1 + + - name: Fetch Kernel Submodule + shell: bash + run: git submodule update --checkout --init --depth 1 FreeRTOS/Source + + - name: Install GNU ARM Toolchain + shell: bash + run: | + sudo apt-get -y update + sudo apt-get -y install gcc-arm-none-eabi build-essential cmake git ninja-build python3-minimal + + - name: Build CORTEX_MPU_M3_MPS2_QEMU_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_MPU_M3_MPS2_QEMU_GCC + run: make -j + + - name: Build CORTEX_LM3S102_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_LM3S102_GCC + run: make -j + + - name: Build CORTEX_M3_MPS2_QEMU_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC + run: | + make clean + make -j + + - name: Build CORTEX_M3_MPS2_QEMU_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_M3_MPS2_QEMU_GCC + run: | + make clean + make FULL_DEMO=1 -j + + - name: Build CORTEX_LM3S811_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_LM3S811_GCC + run: make -j + + - name: Build CORTEX_M0+_RP2040 Demos + shell: bash + working-directory: FreeRTOS/Demo/ThirdParty/Community-Supported/CORTEX_M0+_RP2040 + run: | + git clone https://github.com/raspberrypi/pico-sdk.git + cmake -B build -DPICO_SDK_PATH=pico-sdk -GNinja + ninja -C build --verbose + + - name: Build CORTEX_MPS2_QEMU_IAR_GCC Demo + shell: bash + working-directory: FreeRTOS/Demo/CORTEX_MPS2_QEMU_IAR_GCC + run: make -C build/gcc -j From 78319fd17ed8256b93ce91251d1bdd4907940b52 Mon Sep 17 00:00:00 2001 From: Chris Copeland Date: Thu, 19 Jan 2023 14:46:42 -0800 Subject: [PATCH 05/62] Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent (#611) Allow ulTaskGetIdleRunTimeCounter and ulTaskGetIdleRunTimePercent to be used whenever configGENERATE_RUN_TIME_STATS is enabled, as this is the only requirement for these functions to work. --- .github/lexicon.txt | 2 ++ include/task.h | 49 ++++++++++++++++++++++++++++++++++++++------- tasks.c | 34 ++++++++++++++++++++++++------- 3 files changed, 71 insertions(+), 14 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index ac548204172..765b05c1eb5 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -2321,6 +2321,8 @@ ulstoppedtimercompensation ultablebase ultaskgetidleruntimecounter ultaskgetidleruntimepercent +ultaskgetruntimecounter +ultaskgetruntimepercent ultaskhasfpucontext ultasknotifystateclear ultasknotifytake diff --git a/include/task.h b/include/task.h index b01551d5366..f4e72c2c2f5 100644 --- a/include/task.h +++ b/include/task.h @@ -1934,6 +1934,42 @@ void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unquali */ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +/** + * task. h + * @code{c} + * configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ); + * configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ); + * @endcode + * + * configGENERATE_RUN_TIME_STATS must be defined as 1 for these functions to be + * available. The application must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() to configure a peripheral timer/counter and + * return the timers current count value respectively. The counter should be + * at least 10 times the frequency of the tick count. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total + * execution time of each task into a buffer, ulTaskGetRunTimeCounter() + * returns the total execution time of just one task and + * ulTaskGetRunTimePercent() returns the percentage of the CPU time used by + * just one task. + * + * @return The total run time of the given task or the percentage of the total + * run time consumed by the given task. This is the amount of time the task + * has actually been executing. The unit of time is dependent on the frequency + * configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() macros. + * + * \defgroup ulTaskGetRunTimeCounter ulTaskGetRunTimeCounter + * \ingroup TaskUtils + */ +configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; +configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + /** * task. h * @code{c} @@ -1941,13 +1977,12 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e * configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ); * @endcode * - * configGENERATE_RUN_TIME_STATS, configUSE_STATS_FORMATTING_FUNCTIONS and - * INCLUDE_xTaskGetIdleTaskHandle must all be defined as 1 for these functions - * to be available. The application must also then provide definitions for - * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() - * to configure a peripheral timer/counter and return the timers current count - * value respectively. The counter should be at least 10 times the frequency of - * the tick count. + * configGENERATE_RUN_TIME_STATS must be defined as 1 for these functions to be + * available. The application must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() to configure a peripheral timer/counter and + * return the timers current count value respectively. The counter should be + * at least 10 times the frequency of the tick count. * * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total * accumulated execution time being stored for each task. The resolution diff --git a/tasks.c b/tasks.c index 3a0f0470e78..ac221afe739 100644 --- a/tasks.c +++ b/tasks.c @@ -5251,19 +5251,19 @@ TickType_t uxTaskResetEventItemValue( void ) #endif /* configUSE_TASK_NOTIFICATIONS */ /*-----------------------------------------------------------*/ -#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) +#if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) + configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) { - return xIdleTaskHandle->ulRunTimeCounter; + return xTask->ulRunTimeCounter; } #endif /*-----------------------------------------------------------*/ -#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) +#if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) + configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; @@ -5275,7 +5275,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* Avoid divide by zero errors. */ if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 ) { - ulReturn = xIdleTaskHandle->ulRunTimeCounter / ulTotalTime; + ulReturn = xTask->ulRunTimeCounter / ulTotalTime; } else { @@ -5285,7 +5285,27 @@ TickType_t uxTaskResetEventItemValue( void ) return ulReturn; } -#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +#endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void ) + { + return ulTaskGetRunTimeCounter( xIdleTaskHandle ); + } + +#endif +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void ) + { + return ulTaskGetRunTimePercent( xIdleTaskHandle ); + } + +#endif /*-----------------------------------------------------------*/ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, From 260a37c082f4b70f9c6f361eadb0b29f1a448e3a Mon Sep 17 00:00:00 2001 From: "David J. Fiddes" <35607151+davefiddes@users.noreply.github.com> Date: Mon, 23 Jan 2023 17:16:24 +0000 Subject: [PATCH 06/62] Fix some CMake documentation typos (#616) The quick start instructions for CMake mention the "master" git branch which has been replaced by "main" in the current repo. The main CMakeLists.txt documents how to integrate a custom port. Fix a typo in the suggested CMake code. --- CMakeLists.txt | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d4a7799f7c..09bd4bdb84d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -217,7 +217,7 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " target_include_directories(freertos_kernel_port\n" " PUBLIC\n" " .)\n" - " taget_link_libraries(freertos_kernel_port\n" + " target_link_libraries(freertos_kernel_port\n" " PRIVATE\n" " freertos_kernel)") endif() diff --git a/README.md b/README.md index 5674872421c..025a135774f 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Add the following into your project's main or a subdirectory's `CMakeLists.txt`: ```cmake FetchContent_Declare( freertos_kernel GIT_REPOSITORY https://github.com/FreeRTOS/FreeRTOS-Kernel.git - GIT_TAG master #Note: Best practice to use specific git-hash or tagged version + GIT_TAG main #Note: Best practice to use specific git-hash or tagged version ) ``` From 91c20f5f425b18b641765217586a82a62e1aa45a Mon Sep 17 00:00:00 2001 From: Dusan Cervenka Date: Fri, 3 Feb 2023 15:34:02 +0100 Subject: [PATCH 07/62] Added support of 64bit events. (#597) * Added support of 64bit even Signed-off-by: Cervenka Dusan * Added missing brackets Signed-off-by: Cervenka Dusan * Made proper name for tick macro. Signed-off-by: Cervenka Dusan * Improved macro evaluation Signed-off-by: Cervenka Dusan * Fixed missed port files + documentation Signed-off-by: Cervenka Dusan * Changes made on PR Signed-off-by: Cervenka Dusan * Fix macro definition. Signed-off-by: Cervenka Dusan * Formatted code with uncrustify Signed-off-by: Cervenka Dusan --------- Signed-off-by: Cervenka Dusan --- event_groups.c | 11 +++++-- include/FreeRTOS.h | 29 +++++++++++++++++-- include/event_groups.h | 26 +++++++++-------- include/projdefs.h | 8 +++-- portable/ARMv8M/non_secure/portmacrocommon.h | 6 ++-- portable/BCC/16BitDOS/Flsh186/prtmacro.h | 6 ++-- portable/BCC/16BitDOS/PC/prtmacro.h | 6 ++-- portable/CCS/ARM_CM3/portmacro.h | 6 ++-- portable/CCS/ARM_CM4F/portmacro.h | 6 ++-- portable/CCS/ARM_Cortex-R4/portmacro.h | 6 ++-- portable/CCS/MSP430X/portmacro.h | 8 +++-- portable/CodeWarrior/ColdFire_V1/portmacro.h | 8 +++-- portable/CodeWarrior/ColdFire_V2/portmacro.h | 8 +++-- portable/CodeWarrior/HCS12/portmacro.h | 8 +++-- portable/GCC/ARM7_AT91FR40008/portmacro.h | 8 +++-- portable/GCC/ARM7_AT91SAM7S/portmacro.h | 8 +++-- portable/GCC/ARM7_LPC2000/portmacro.h | 8 +++-- portable/GCC/ARM7_LPC23xx/portmacro.h | 8 +++-- portable/GCC/ARM_CM0/portmacro.h | 6 ++-- .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/GCC/ARM_CM3/portmacro.h | 6 ++-- .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/GCC/ARM_CM3_MPU/portmacro.h | 6 ++-- portable/GCC/ARM_CM4F/portmacro.h | 9 ++++-- portable/GCC/ARM_CM4_MPU/portmacro.h | 6 ++-- .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/GCC/ARM_CM7/r0p1/portmacro.h | 6 ++-- .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/GCC/ATMega323/portmacro.h | 8 +++-- portable/GCC/AVR32_UC3/portmacro.h | 8 +++-- portable/GCC/CORTUS_APS3/portmacro.h | 8 +++-- portable/GCC/ColdFire_V2/portmacro.h | 8 +++-- portable/GCC/H8S2329/portmacro.h | 8 +++-- portable/GCC/HCS12/portmacro.h | 8 +++-- portable/GCC/MSP430F449/portmacro.h | 8 +++-- portable/GCC/MicroBlaze/portmacro.h | 6 ++-- portable/GCC/MicroBlazeV8/portmacro.h | 6 ++-- portable/GCC/MicroBlazeV9/portmacro.h | 6 ++-- portable/GCC/NiosII/portmacro.h | 6 ++-- portable/GCC/PPC405_Xilinx/portmacro.h | 8 +++-- portable/GCC/PPC440_Xilinx/portmacro.h | 8 +++-- portable/GCC/RL78/portmacro.h | 8 +++-- portable/GCC/RX100/portmacro.h | 6 ++-- portable/GCC/RX200/portmacro.h | 6 ++-- portable/GCC/RX600/portmacro.h | 6 ++-- portable/GCC/RX600v2/portmacro.h | 6 ++-- portable/GCC/RX700v3_DPFPU/portmacro.h | 6 ++-- portable/GCC/STR75x/portmacro.h | 8 +++-- portable/GCC/TriCore_1782/portmacro.h | 6 ++-- portable/IAR/78K0R/portmacro.h | 8 +++-- portable/IAR/ARM_CM0/portmacro.h | 6 ++-- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/IAR/ARM_CM3/portmacro.h | 6 ++-- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/IAR/ARM_CM4F/portmacro.h | 6 ++-- portable/IAR/ARM_CM4F_MPU/portmacro.h | 6 ++-- .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/IAR/ARM_CM7/r0p1/portmacro.h | 6 ++-- .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 6 ++-- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 6 ++-- portable/IAR/ATMega323/portmacro.h | 8 +++-- portable/IAR/AVR32_UC3/portmacro.h | 8 +++-- portable/IAR/AVR_AVRDx/portmacro.h | 8 +++-- portable/IAR/AVR_Mega0/portmacro.h | 8 +++-- portable/IAR/AtmelSAM7S64/portmacro.h | 8 +++-- portable/IAR/AtmelSAM9XE/portmacro.h | 8 +++-- portable/IAR/LPC2000/portmacro.h | 8 +++-- portable/IAR/MSP430/portmacro.h | 8 +++-- portable/IAR/MSP430X/portmacro.h | 8 +++-- portable/IAR/RL78/portmacro.h | 8 +++-- portable/IAR/RX100/portmacro.h | 6 ++-- portable/IAR/RX600/portmacro.h | 6 ++-- portable/IAR/RX700v3_DPFPU/portmacro.h | 6 ++-- portable/IAR/RXv2/portmacro.h | 6 ++-- portable/IAR/STR71x/portmacro.h | 8 +++-- portable/IAR/STR75x/portmacro.h | 8 +++-- portable/IAR/STR91x/portmacro.h | 8 +++-- portable/IAR/V850ES/portmacro.h | 8 +++-- portable/MPLAB/PIC18F/portmacro.h | 8 +++-- portable/MPLAB/PIC24_dsPIC/portmacro.h | 8 +++-- portable/MPLAB/PIC32MEC14xx/portmacro.h | 8 +++-- portable/MPLAB/PIC32MX/portmacro.h | 6 ++-- portable/MPLAB/PIC32MZ/portmacro.h | 6 ++-- portable/MSVC-MingW/portmacro.h | 6 ++-- portable/MikroC/ARM_CM4F/portmacro.h | 6 ++-- .../Tern_EE/large_untested/portmacro.h | 8 +++-- portable/Paradigm/Tern_EE/small/portmacro.h | 8 +++-- portable/RVDS/ARM7_LPC21xx/portmacro.h | 8 +++-- portable/RVDS/ARM_CA9/portmacro.h | 6 ++-- portable/RVDS/ARM_CM0/portmacro.h | 6 ++-- portable/RVDS/ARM_CM3/portmacro.h | 6 ++-- portable/RVDS/ARM_CM4F/portmacro.h | 6 ++-- portable/RVDS/ARM_CM4_MPU/portmacro.h | 6 ++-- portable/RVDS/ARM_CM7/r0p1/portmacro.h | 6 ++-- portable/Renesas/RX100/portmacro.h | 6 ++-- portable/Renesas/RX200/portmacro.h | 6 ++-- portable/Renesas/RX600/portmacro.h | 6 ++-- portable/Renesas/RX600v2/portmacro.h | 6 ++-- portable/Renesas/RX700v3_DPFPU/portmacro.h | 6 ++-- portable/Renesas/SH2A_FPU/portmacro.h | 4 +-- portable/Rowley/MSP430F449/portmacro.h | 8 +++-- portable/SDCC/Cygnal/portmacro.h | 8 +++-- portable/Softune/MB91460/portmacro.h | 8 +++-- portable/Softune/MB96340/portmacro.h | 8 +++-- portable/Tasking/ARM_CM4F/portmacro.h | 6 ++-- .../ThirdParty/CDK/T-HEAD_CK802/portmacro.h | 8 +++-- portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h | 8 +++-- portable/ThirdParty/GCC/ARC_v1/portmacro.h | 8 +++-- portable/ThirdParty/GCC/ATmega/portmacro.h | 8 +++-- .../ThirdParty/GCC/RP2040/include/portmacro.h | 6 ++-- portable/ThirdParty/GCC/RP2040/port.c | 12 ++++---- .../GCC/Xtensa_ESP32/include/portmacro.h | 6 ++-- portable/ThirdParty/XCC/Xtensa/portmacro.h | 6 ++-- portable/WizC/PIC18/portmacro.h | 6 ++-- portable/oWatcom/16BitDOS/Flsh186/portmacro.h | 8 +++-- portable/oWatcom/16BitDOS/PC/portmacro.h | 8 +++-- tasks.c | 6 ++-- 124 files changed, 588 insertions(+), 313 deletions(-) diff --git a/event_groups.c b/event_groups.c index c5a2aa080a5..a64c5ead34c 100644 --- a/event_groups.c +++ b/event_groups.c @@ -49,17 +49,22 @@ /* The following bit fields convey control information in a task's event list * item value. It is important they don't clash with the * taskEVENT_LIST_ITEM_VALUE_IN_USE definition. */ -#if configUSE_16_BIT_TICKS == 1 +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100U #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200U #define eventWAIT_FOR_ALL_BITS 0x0400U #define eventEVENT_BITS_CONTROL_BYTES 0xff00U -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x01000000UL #define eventUNBLOCKED_DUE_TO_BIT_SET 0x02000000UL #define eventWAIT_FOR_ALL_BITS 0x04000000UL #define eventEVENT_BITS_CONTROL_BYTES 0xff000000UL -#endif +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + #define eventCLEAR_EVENTS_ON_EXIT_BIT 0x0100000000000000ULL + #define eventUNBLOCKED_DUE_TO_BIT_SET 0x0200000000000000ULL + #define eventWAIT_FOR_ALL_BITS 0x0400000000000000ULL + #define eventEVENT_BITS_CONTROL_BYTES 0xff00000000000000ULL +#endif /* if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) */ typedef struct EventGroupDef_t { diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index e7d779a7102..790a06db058 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -55,6 +55,11 @@ #endif /* *INDENT-ON* */ +/* Acceptable values for configTICK_TYPE_WIDTH_IN_BITS. */ +#define TICK_TYPE_WIDTH_16_BITS 0 +#define TICK_TYPE_WIDTH_32_BITS 1 +#define TICK_TYPE_WIDTH_64_BITS 2 + /* Application specific configuration options. */ #include "FreeRTOSConfig.h" @@ -155,8 +160,28 @@ #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif -#ifndef configUSE_16_BIT_TICKS - #error Missing definition: configUSE_16_BIT_TICKS must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. +#if !defined( configUSE_16_BIT_TICKS ) && !defined( configTICK_TYPE_WIDTH_IN_BITS ) + #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#if defined( configUSE_16_BIT_TICKS ) && defined( configTICK_TYPE_WIDTH_IN_BITS ) + #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +/* Define configTICK_TYPE_WIDTH_IN_BITS according to the + * value of configUSE_16_BIT_TICKS for backward compatibility. */ +#ifndef configTICK_TYPE_WIDTH_IN_BITS + #if ( configUSE_16_BIT_TICKS == 1 ) + #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_16_BITS + #else + #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_32_BITS + #endif +#endif + +#if ( ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_16_BITS ) && \ + ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_32_BITS ) && \ + ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_64_BITS ) ) + #error Macro configTICK_TYPE_WIDTH_IN_BITS is defined to incorrect value. See the Configuration section of the FreeRTOS API documentation for details. #endif #ifndef INCLUDE_vTaskPrioritySet diff --git a/include/event_groups.h b/include/event_groups.h index 949ddd9143f..3f37d90be5d 100644 --- a/include/event_groups.h +++ b/include/event_groups.h @@ -84,8 +84,8 @@ typedef struct EventGroupDef_t * EventGroupHandle_t; /* * The type that holds event bits always matches TickType_t - therefore the - * number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1, - * 32 bits if set to 0. + * number of bits it holds is set by configTICK_TYPE_WIDTH_IN_BITS (16 bits if set to 0, + * 32 bits if set to 1, 64 bits if set to 2. * * \defgroup EventBits_t EventBits_t * \ingroup EventGroup @@ -112,11 +112,12 @@ typedef TickType_t EventBits_t; * * Although event groups are not related to ticks, for internal implementation * reasons the number of bits available for use in an event group is dependent - * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If - * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit - * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has - * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store - * event bits within an event group. + * on the configTICK_TYPE_WIDTH_IN_BITS setting in FreeRTOSConfig.h. If + * configTICK_TYPE_WIDTH_IN_BITS is 0 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configTICK_TYPE_WIDTH_IN_BITS is set to 1 then each event group has + * 24 usable bits (bit 0 to bit 23). If configTICK_TYPE_WIDTH_IN_BITS is set to 2 then + * each event group has 56 usable bits (bit 0 to bit 53). The EventBits_t type + * is used to store event bits within an event group. * * @return If the event group was created then a handle to the event group is * returned. If there was insufficient FreeRTOS heap available to create the @@ -168,11 +169,12 @@ typedef TickType_t EventBits_t; * * Although event groups are not related to ticks, for internal implementation * reasons the number of bits available for use in an event group is dependent - * on the configUSE_16_BIT_TICKS setting in FreeRTOSConfig.h. If - * configUSE_16_BIT_TICKS is 1 then each event group contains 8 usable bits (bit - * 0 to bit 7). If configUSE_16_BIT_TICKS is set to 0 then each event group has - * 24 usable bits (bit 0 to bit 23). The EventBits_t type is used to store - * event bits within an event group. + * on the configTICK_TYPE_WIDTH_IN_BITS setting in FreeRTOSConfig.h. If + * configTICK_TYPE_WIDTH_IN_BITS is 0 then each event group contains 8 usable bits (bit + * 0 to bit 7). If configTICK_TYPE_WIDTH_IN_BITS is set to 1 then each event group has + * 24 usable bits (bit 0 to bit 23). If configTICK_TYPE_WIDTH_IN_BITS is set to 2 then + * each event group has 56 usable bits (bit 0 to bit 53). The EventBits_t type + * is used to store event bits within an event group. * * @param pxEventGroupBuffer pxEventGroupBuffer must point to a variable of type * StaticEventGroup_t, which will be then be used to hold the event group's data diff --git a/include/projdefs.h b/include/projdefs.h index b4b8c14ff79..67fc5351937 100644 --- a/include/projdefs.h +++ b/include/projdefs.h @@ -60,10 +60,14 @@ typedef void (* TaskFunction_t)( void * ); #define configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES 0 #endif -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) #define pdINTEGRITY_CHECK_VALUE 0x5a5a -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5aUL +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + #define pdINTEGRITY_CHECK_VALUE 0x5a5a5a5a5a5a5a5aULL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /* The following errno values are used by FreeRTOS+ components, not FreeRTOS diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/BCC/16BitDOS/Flsh186/prtmacro.h b/portable/BCC/16BitDOS/Flsh186/prtmacro.h index 5aaebaa4d8b..295c0bc736e 100644 --- a/portable/BCC/16BitDOS/Flsh186/prtmacro.h +++ b/portable/BCC/16BitDOS/Flsh186/prtmacro.h @@ -52,12 +52,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/BCC/16BitDOS/PC/prtmacro.h b/portable/BCC/16BitDOS/PC/prtmacro.h index 1d26c5bf416..5fb4ed6a497 100644 --- a/portable/BCC/16BitDOS/PC/prtmacro.h +++ b/portable/BCC/16BitDOS/PC/prtmacro.h @@ -52,12 +52,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_CM3/portmacro.h b/portable/CCS/ARM_CM3/portmacro.h index 5b5f4fbac07..7ac1d709caf 100644 --- a/portable/CCS/ARM_CM3/portmacro.h +++ b/portable/CCS/ARM_CM3/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_CM4F/portmacro.h b/portable/CCS/ARM_CM4F/portmacro.h index 9e78588ad94..34988c223f5 100644 --- a/portable/CCS/ARM_CM4F/portmacro.h +++ b/portable/CCS/ARM_CM4F/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CCS/ARM_Cortex-R4/portmacro.h b/portable/CCS/ARM_Cortex-R4/portmacro.h index 5d9e91d66a9..07c1827cb7a 100644 --- a/portable/CCS/ARM_Cortex-R4/portmacro.h +++ b/portable/CCS/ARM_Cortex-R4/portmacro.h @@ -52,16 +52,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if (configUSE_16_BIT_TICKS == 1) +#if (configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS) typedef uint16_t TickType_t; #define portMAX_DELAY (TickType_t) 0xFFFF -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY (TickType_t) 0xFFFFFFFFF /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/CCS/MSP430X/portmacro.h b/portable/CCS/MSP430X/portmacro.h index 1d651c19410..064b0503ae4 100644 --- a/portable/CCS/MSP430X/portmacro.h +++ b/portable/CCS/MSP430X/portmacro.h @@ -62,12 +62,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CodeWarrior/ColdFire_V1/portmacro.h b/portable/CodeWarrior/ColdFire_V1/portmacro.h index eca83debbab..edae69ab2b2 100644 --- a/portable/CodeWarrior/ColdFire_V1/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V1/portmacro.h @@ -57,12 +57,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CodeWarrior/ColdFire_V2/portmacro.h b/portable/CodeWarrior/ColdFire_V2/portmacro.h index 634d7404202..665ae4b304c 100644 --- a/portable/CodeWarrior/ColdFire_V2/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V2/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/CodeWarrior/HCS12/portmacro.h b/portable/CodeWarrior/HCS12/portmacro.h index 2fd14ae4e01..d0d0a140eb4 100644 --- a/portable/CodeWarrior/HCS12/portmacro.h +++ b/portable/CodeWarrior/HCS12/portmacro.h @@ -53,12 +53,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_AT91FR40008/portmacro.h b/portable/GCC/ARM7_AT91FR40008/portmacro.h index 8a1666e1319..69ee70f078c 100644 --- a/portable/GCC/ARM7_AT91FR40008/portmacro.h +++ b/portable/GCC/ARM7_AT91FR40008/portmacro.h @@ -79,12 +79,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_AT91SAM7S/portmacro.h b/portable/GCC/ARM7_AT91SAM7S/portmacro.h index 4f514258805..1a440a68c5f 100644 --- a/portable/GCC/ARM7_AT91SAM7S/portmacro.h +++ b/portable/GCC/ARM7_AT91SAM7S/portmacro.h @@ -79,12 +79,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_LPC2000/portmacro.h b/portable/GCC/ARM7_LPC2000/portmacro.h index 84029962b58..50922065441 100644 --- a/portable/GCC/ARM7_LPC2000/portmacro.h +++ b/portable/GCC/ARM7_LPC2000/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM7_LPC23xx/portmacro.h b/portable/GCC/ARM7_LPC23xx/portmacro.h index c69003ca8d0..768e86dbed1 100644 --- a/portable/GCC/ARM7_LPC23xx/portmacro.h +++ b/portable/GCC/ARM7_LPC23xx/portmacro.h @@ -79,12 +79,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index a89c8ba42d0..6d60fce4c90 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index a7b45a5867a..c1f7b899217 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index 693fc7b9091..d3b3af58f4c 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index 0ab47e05b86..1ffdc5b8b93 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -57,16 +57,21 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + typedef uint64_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffffffffffULL + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index 462075854ce..f3365d14371 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index 214dc2bffb5..d65a4c24932 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ATMega323/portmacro.h b/portable/GCC/ATMega323/portmacro.h index 7afdef9075c..c22706de5d4 100644 --- a/portable/GCC/ATMega323/portmacro.h +++ b/portable/GCC/ATMega323/portmacro.h @@ -65,12 +65,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/AVR32_UC3/portmacro.h b/portable/GCC/AVR32_UC3/portmacro.h index 2ebc711402a..86014a135b5 100644 --- a/portable/GCC/AVR32_UC3/portmacro.h +++ b/portable/GCC/AVR32_UC3/portmacro.h @@ -109,12 +109,14 @@ typedef unsigned long UBaseType_t; #define configTICK_TC_IRQ ATPASTE2(AVR32_TC_IRQ, configTICK_TC_CHANNEL) -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/CORTUS_APS3/portmacro.h b/portable/GCC/CORTUS_APS3/portmacro.h index 486db878961..86000665681 100644 --- a/portable/GCC/CORTUS_APS3/portmacro.h +++ b/portable/GCC/CORTUS_APS3/portmacro.h @@ -58,12 +58,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ColdFire_V2/portmacro.h b/portable/GCC/ColdFire_V2/portmacro.h index 8792cd96d18..9b4c1da8833 100644 --- a/portable/GCC/ColdFire_V2/portmacro.h +++ b/portable/GCC/ColdFire_V2/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/H8S2329/portmacro.h b/portable/GCC/H8S2329/portmacro.h index f568853fc4e..4c87abd163c 100644 --- a/portable/GCC/H8S2329/portmacro.h +++ b/portable/GCC/H8S2329/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/HCS12/portmacro.h b/portable/GCC/HCS12/portmacro.h index 1a458f9ae66..9202510ccc8 100644 --- a/portable/GCC/HCS12/portmacro.h +++ b/portable/GCC/HCS12/portmacro.h @@ -58,12 +58,14 @@ typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/MSP430F449/portmacro.h b/portable/GCC/MSP430F449/portmacro.h index 149de12178b..8d6827a57e9 100644 --- a/portable/GCC/MSP430F449/portmacro.h +++ b/portable/GCC/MSP430F449/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/MicroBlaze/portmacro.h b/portable/GCC/MicroBlaze/portmacro.h index 5bc52ffe26c..7d96d5c9004 100644 --- a/portable/GCC/MicroBlaze/portmacro.h +++ b/portable/GCC/MicroBlaze/portmacro.h @@ -56,16 +56,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/MicroBlazeV8/portmacro.h b/portable/GCC/MicroBlazeV8/portmacro.h index 28e54017265..a4237336f2e 100644 --- a/portable/GCC/MicroBlazeV8/portmacro.h +++ b/portable/GCC/MicroBlazeV8/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/MicroBlazeV9/portmacro.h b/portable/GCC/MicroBlazeV9/portmacro.h index f41205e3b93..84b358f5b5a 100644 --- a/portable/GCC/MicroBlazeV9/portmacro.h +++ b/portable/GCC/MicroBlazeV9/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/NiosII/portmacro.h b/portable/GCC/NiosII/portmacro.h index fb482ffb978..7a159925db2 100644 --- a/portable/GCC/NiosII/portmacro.h +++ b/portable/GCC/NiosII/portmacro.h @@ -58,16 +58,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/PPC405_Xilinx/portmacro.h b/portable/GCC/PPC405_Xilinx/portmacro.h index eaad8fe7b75..d7f32944c40 100644 --- a/portable/GCC/PPC405_Xilinx/portmacro.h +++ b/portable/GCC/PPC405_Xilinx/portmacro.h @@ -58,12 +58,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/PPC440_Xilinx/portmacro.h b/portable/GCC/PPC440_Xilinx/portmacro.h index eaad8fe7b75..d7f32944c40 100644 --- a/portable/GCC/PPC440_Xilinx/portmacro.h +++ b/portable/GCC/PPC440_Xilinx/portmacro.h @@ -58,12 +58,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RL78/portmacro.h b/portable/GCC/RL78/portmacro.h index 4b3cc497235..8108f1c1221 100644 --- a/portable/GCC/RL78/portmacro.h +++ b/portable/GCC/RL78/portmacro.h @@ -54,12 +54,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX100/portmacro.h b/portable/GCC/RX100/portmacro.h index 842754f1f1e..863596685aa 100644 --- a/portable/GCC/RX100/portmacro.h +++ b/portable/GCC/RX100/portmacro.h @@ -63,16 +63,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX200/portmacro.h b/portable/GCC/RX200/portmacro.h index 7c3fefc253c..80fc8c8041d 100644 --- a/portable/GCC/RX200/portmacro.h +++ b/portable/GCC/RX200/portmacro.h @@ -64,16 +64,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600/portmacro.h b/portable/GCC/RX600/portmacro.h index 5919edfe081..7e1fd1e0c92 100644 --- a/portable/GCC/RX600/portmacro.h +++ b/portable/GCC/RX600/portmacro.h @@ -64,16 +64,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX600v2/portmacro.h b/portable/GCC/RX600v2/portmacro.h index 5919edfe081..7e1fd1e0c92 100644 --- a/portable/GCC/RX600v2/portmacro.h +++ b/portable/GCC/RX600v2/portmacro.h @@ -64,16 +64,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/RX700v3_DPFPU/portmacro.h b/portable/GCC/RX700v3_DPFPU/portmacro.h index 75d405fb8a1..6b76374c882 100644 --- a/portable/GCC/RX700v3_DPFPU/portmacro.h +++ b/portable/GCC/RX700v3_DPFPU/portmacro.h @@ -76,16 +76,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/STR75x/portmacro.h b/portable/GCC/STR75x/portmacro.h index b7fbe669305..85262dac4d5 100644 --- a/portable/GCC/STR75x/portmacro.h +++ b/portable/GCC/STR75x/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/GCC/TriCore_1782/portmacro.h b/portable/GCC/TriCore_1782/portmacro.h index 4e51954afcc..734abc5949c 100644 --- a/portable/GCC/TriCore_1782/portmacro.h +++ b/portable/GCC/TriCore_1782/portmacro.h @@ -60,16 +60,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*---------------------------------------------------------------------------*/ diff --git a/portable/IAR/78K0R/portmacro.h b/portable/IAR/78K0R/portmacro.h index 673cf156c4f..e6f67cdd3b7 100644 --- a/portable/IAR/78K0R/portmacro.h +++ b/portable/IAR/78K0R/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if (configUSE_16_BIT_TICKS==1) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef unsigned int TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM0/portmacro.h b/portable/IAR/ARM_CM0/portmacro.h index 00561582349..ce1aa08fc9d 100644 --- a/portable/IAR/ARM_CM0/portmacro.h +++ b/portable/IAR/ARM_CM0/portmacro.h @@ -57,16 +57,18 @@ typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM3/portmacro.h b/portable/IAR/ARM_CM3/portmacro.h index 3825a7c5317..c334978ea73 100644 --- a/portable/IAR/ARM_CM3/portmacro.h +++ b/portable/IAR/ARM_CM3/portmacro.h @@ -60,16 +60,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM4F/portmacro.h b/portable/IAR/ARM_CM4F/portmacro.h index e6c8726f5f8..148c81d14ee 100644 --- a/portable/IAR/ARM_CM4F/portmacro.h +++ b/portable/IAR/ARM_CM4F/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h index 1e44234fae4..96787e7c31f 100644 --- a/portable/IAR/ARM_CM4F_MPU/portmacro.h +++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h @@ -62,16 +62,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM7/r0p1/portmacro.h b/portable/IAR/ARM_CM7/r0p1/portmacro.h index 813cc545a4c..08867c289c3 100644 --- a/portable/IAR/ARM_CM7/r0p1/portmacro.h +++ b/portable/IAR/ARM_CM7/r0p1/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index 51e21ea8ddd..ca7e9225c05 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -72,16 +72,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ATMega323/portmacro.h b/portable/IAR/ATMega323/portmacro.h index 973d6d9cac9..111f4d02667 100644 --- a/portable/IAR/ATMega323/portmacro.h +++ b/portable/IAR/ATMega323/portmacro.h @@ -64,12 +64,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AVR32_UC3/portmacro.h b/portable/IAR/AVR32_UC3/portmacro.h index f2d02e2fa7e..eb48b59b589 100644 --- a/portable/IAR/AVR32_UC3/portmacro.h +++ b/portable/IAR/AVR32_UC3/portmacro.h @@ -112,12 +112,14 @@ typedef unsigned long UBaseType_t; #define configTICK_TC_IRQ ATPASTE2(AVR32_TC_IRQ, configTICK_TC_CHANNEL) -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AVR_AVRDx/portmacro.h b/portable/IAR/AVR_AVRDx/portmacro.h index d02c691bd86..5b4b5be8b3c 100644 --- a/portable/IAR/AVR_AVRDx/portmacro.h +++ b/portable/IAR/AVR_AVRDx/portmacro.h @@ -60,12 +60,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AVR_Mega0/portmacro.h b/portable/IAR/AVR_Mega0/portmacro.h index d02c691bd86..5b4b5be8b3c 100644 --- a/portable/IAR/AVR_Mega0/portmacro.h +++ b/portable/IAR/AVR_Mega0/portmacro.h @@ -60,12 +60,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AtmelSAM7S64/portmacro.h b/portable/IAR/AtmelSAM7S64/portmacro.h index 3c651b0398e..173d3fbaa2d 100644 --- a/portable/IAR/AtmelSAM7S64/portmacro.h +++ b/portable/IAR/AtmelSAM7S64/portmacro.h @@ -57,12 +57,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/AtmelSAM9XE/portmacro.h b/portable/IAR/AtmelSAM9XE/portmacro.h index 623a7b060db..3d1d82ec17b 100644 --- a/portable/IAR/AtmelSAM9XE/portmacro.h +++ b/portable/IAR/AtmelSAM9XE/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/LPC2000/portmacro.h b/portable/IAR/LPC2000/portmacro.h index e7fd1097b78..f41d8b3445e 100644 --- a/portable/IAR/LPC2000/portmacro.h +++ b/portable/IAR/LPC2000/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/MSP430/portmacro.h b/portable/IAR/MSP430/portmacro.h index 8659c4f02fa..298307f5519 100644 --- a/portable/IAR/MSP430/portmacro.h +++ b/portable/IAR/MSP430/portmacro.h @@ -53,12 +53,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/MSP430X/portmacro.h b/portable/IAR/MSP430X/portmacro.h index d7d60480456..1ab4665b9bf 100644 --- a/portable/IAR/MSP430X/portmacro.h +++ b/portable/IAR/MSP430X/portmacro.h @@ -62,12 +62,14 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RL78/portmacro.h b/portable/IAR/RL78/portmacro.h index 7467c33f18a..9c74a3f0452 100644 --- a/portable/IAR/RL78/portmacro.h +++ b/portable/IAR/RL78/portmacro.h @@ -75,12 +75,14 @@ typedef unsigned short UBaseType_t; #endif -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef unsigned int TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX100/portmacro.h b/portable/IAR/RX100/portmacro.h index ff1dc0eb92d..d408a1389a5 100644 --- a/portable/IAR/RX100/portmacro.h +++ b/portable/IAR/RX100/portmacro.h @@ -64,16 +64,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX600/portmacro.h b/portable/IAR/RX600/portmacro.h index e80f52a63ca..e5decdaf34a 100644 --- a/portable/IAR/RX600/portmacro.h +++ b/portable/IAR/RX600/portmacro.h @@ -61,16 +61,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RX700v3_DPFPU/portmacro.h b/portable/IAR/RX700v3_DPFPU/portmacro.h index 8538b152b49..e0204fa0c3c 100644 --- a/portable/IAR/RX700v3_DPFPU/portmacro.h +++ b/portable/IAR/RX700v3_DPFPU/portmacro.h @@ -79,16 +79,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/RXv2/portmacro.h b/portable/IAR/RXv2/portmacro.h index 07444eb35ae..984a4fb50e2 100644 --- a/portable/IAR/RXv2/portmacro.h +++ b/portable/IAR/RXv2/portmacro.h @@ -61,16 +61,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/STR71x/portmacro.h b/portable/IAR/STR71x/portmacro.h index 492bfb2ae8b..119eec84786 100644 --- a/portable/IAR/STR71x/portmacro.h +++ b/portable/IAR/STR71x/portmacro.h @@ -61,12 +61,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/STR75x/portmacro.h b/portable/IAR/STR75x/portmacro.h index b852c644120..674505d3f0c 100644 --- a/portable/IAR/STR75x/portmacro.h +++ b/portable/IAR/STR75x/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/STR91x/portmacro.h b/portable/IAR/STR91x/portmacro.h index a8b24da878f..43ea6d7e8dc 100644 --- a/portable/IAR/STR91x/portmacro.h +++ b/portable/IAR/STR91x/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/IAR/V850ES/portmacro.h b/portable/IAR/V850ES/portmacro.h index 0381f024471..76b1186761d 100644 --- a/portable/IAR/V850ES/portmacro.h +++ b/portable/IAR/V850ES/portmacro.h @@ -57,12 +57,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if (configUSE_16_BIT_TICKS==1) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC18F/portmacro.h b/portable/MPLAB/PIC18F/portmacro.h index 9b1e44e6a8a..6fef1afe2dd 100644 --- a/portable/MPLAB/PIC18F/portmacro.h +++ b/portable/MPLAB/PIC18F/portmacro.h @@ -52,12 +52,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC24_dsPIC/portmacro.h b/portable/MPLAB/PIC24_dsPIC/portmacro.h index 2a6f44b18ec..d05317e6f8c 100644 --- a/portable/MPLAB/PIC24_dsPIC/portmacro.h +++ b/portable/MPLAB/PIC24_dsPIC/portmacro.h @@ -56,15 +56,17 @@ typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff /* 16-bit tick type on a 16-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC32MEC14xx/portmacro.h b/portable/MPLAB/PIC32MEC14xx/portmacro.h index dad5aa3530f..d9aad5df0aa 100644 --- a/portable/MPLAB/PIC32MEC14xx/portmacro.h +++ b/portable/MPLAB/PIC32MEC14xx/portmacro.h @@ -56,12 +56,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC32MX/portmacro.h b/portable/MPLAB/PIC32MX/portmacro.h index a81dbf341d6..e2a1078e631 100644 --- a/portable/MPLAB/PIC32MX/portmacro.h +++ b/portable/MPLAB/PIC32MX/portmacro.h @@ -59,16 +59,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MPLAB/PIC32MZ/portmacro.h b/portable/MPLAB/PIC32MZ/portmacro.h index 43d65985504..17b266e1124 100644 --- a/portable/MPLAB/PIC32MZ/portmacro.h +++ b/portable/MPLAB/PIC32MZ/portmacro.h @@ -59,16 +59,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/MSVC-MingW/portmacro.h b/portable/MSVC-MingW/portmacro.h index 3cbb06b5533..b1282b3d71e 100644 --- a/portable/MSVC-MingW/portmacro.h +++ b/portable/MSVC-MingW/portmacro.h @@ -50,16 +50,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32/64-bit tick type on a 32/64-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /* Hardware specifics. */ diff --git a/portable/MikroC/ARM_CM4F/portmacro.h b/portable/MikroC/ARM_CM4F/portmacro.h index 1fa3d796403..fa614c31488 100644 --- a/portable/MikroC/ARM_CM4F/portmacro.h +++ b/portable/MikroC/ARM_CM4F/portmacro.h @@ -62,16 +62,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Paradigm/Tern_EE/large_untested/portmacro.h b/portable/Paradigm/Tern_EE/large_untested/portmacro.h index 753686c5a2c..ed513309c7c 100644 --- a/portable/Paradigm/Tern_EE/large_untested/portmacro.h +++ b/portable/Paradigm/Tern_EE/large_untested/portmacro.h @@ -57,12 +57,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Paradigm/Tern_EE/small/portmacro.h b/portable/Paradigm/Tern_EE/small/portmacro.h index 5918b04be60..9eb5b863023 100644 --- a/portable/Paradigm/Tern_EE/small/portmacro.h +++ b/portable/Paradigm/Tern_EE/small/portmacro.h @@ -59,12 +59,14 @@ typedef unsigned short UBaseType_t; typedef void ( __interrupt __far *pxISR )(); -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM7_LPC21xx/portmacro.h b/portable/RVDS/ARM7_LPC21xx/portmacro.h index 821d2e2f26e..95043eb8deb 100644 --- a/portable/RVDS/ARM7_LPC21xx/portmacro.h +++ b/portable/RVDS/ARM7_LPC21xx/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CA9/portmacro.h b/portable/RVDS/ARM_CA9/portmacro.h index a092952a940..65dd8174f7c 100644 --- a/portable/RVDS/ARM_CA9/portmacro.h +++ b/portable/RVDS/ARM_CA9/portmacro.h @@ -57,16 +57,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM0/portmacro.h b/portable/RVDS/ARM_CM0/portmacro.h index c6cd9e859c1..54165e74cc4 100644 --- a/portable/RVDS/ARM_CM0/portmacro.h +++ b/portable/RVDS/ARM_CM0/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM3/portmacro.h b/portable/RVDS/ARM_CM3/portmacro.h index 5ecfb7b0e8b..c23a6b3fdbd 100644 --- a/portable/RVDS/ARM_CM3/portmacro.h +++ b/portable/RVDS/ARM_CM3/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM4F/portmacro.h b/portable/RVDS/ARM_CM4F/portmacro.h index 1b3f93b0e97..de92c8d4bd9 100644 --- a/portable/RVDS/ARM_CM4F/portmacro.h +++ b/portable/RVDS/ARM_CM4F/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h index ac89aedac97..c7cd5628951 100644 --- a/portable/RVDS/ARM_CM4_MPU/portmacro.h +++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h @@ -59,16 +59,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/RVDS/ARM_CM7/r0p1/portmacro.h b/portable/RVDS/ARM_CM7/r0p1/portmacro.h index a46ec16bce0..a8fa6630e65 100644 --- a/portable/RVDS/ARM_CM7/r0p1/portmacro.h +++ b/portable/RVDS/ARM_CM7/r0p1/portmacro.h @@ -59,16 +59,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX100/portmacro.h b/portable/Renesas/RX100/portmacro.h index cc789b2be6f..9bcbd3c8c3d 100644 --- a/portable/Renesas/RX100/portmacro.h +++ b/portable/Renesas/RX100/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX200/portmacro.h b/portable/Renesas/RX200/portmacro.h index fc20449a7d5..62a085023e2 100644 --- a/portable/Renesas/RX200/portmacro.h +++ b/portable/Renesas/RX200/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600/portmacro.h b/portable/Renesas/RX600/portmacro.h index 1430b54a9f0..3b29cbddb50 100644 --- a/portable/Renesas/RX600/portmacro.h +++ b/portable/Renesas/RX600/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX600v2/portmacro.h b/portable/Renesas/RX600v2/portmacro.h index eb12462272d..d67a8f892aa 100644 --- a/portable/Renesas/RX600v2/portmacro.h +++ b/portable/Renesas/RX600v2/portmacro.h @@ -61,16 +61,18 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/RX700v3_DPFPU/portmacro.h b/portable/Renesas/RX700v3_DPFPU/portmacro.h index da1e0b1d67e..12657ee82bf 100644 --- a/portable/Renesas/RX700v3_DPFPU/portmacro.h +++ b/portable/Renesas/RX700v3_DPFPU/portmacro.h @@ -79,16 +79,18 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Renesas/SH2A_FPU/portmacro.h b/portable/Renesas/SH2A_FPU/portmacro.h index 8fb7e66a536..422cd59ed48 100644 --- a/portable/Renesas/SH2A_FPU/portmacro.h +++ b/portable/Renesas/SH2A_FPU/portmacro.h @@ -60,10 +60,10 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL diff --git a/portable/Rowley/MSP430F449/portmacro.h b/portable/Rowley/MSP430F449/portmacro.h index c56e436d984..7137a6e0964 100644 --- a/portable/Rowley/MSP430F449/portmacro.h +++ b/portable/Rowley/MSP430F449/portmacro.h @@ -53,12 +53,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/SDCC/Cygnal/portmacro.h b/portable/SDCC/Cygnal/portmacro.h index f4fa20e8478..04186381f62 100644 --- a/portable/SDCC/Cygnal/portmacro.h +++ b/portable/SDCC/Cygnal/portmacro.h @@ -61,12 +61,14 @@ typedef portSTACK_TYPE StackType_t; typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Softune/MB91460/portmacro.h b/portable/Softune/MB91460/portmacro.h index 2a3c6f189fb..9ae6959c46b 100644 --- a/portable/Softune/MB91460/portmacro.h +++ b/portable/Softune/MB91460/portmacro.h @@ -59,12 +59,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Softune/MB96340/portmacro.h b/portable/Softune/MB96340/portmacro.h index c953083f13e..827874fde9d 100644 --- a/portable/Softune/MB96340/portmacro.h +++ b/portable/Softune/MB96340/portmacro.h @@ -65,12 +65,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/Tasking/ARM_CM4F/portmacro.h b/portable/Tasking/ARM_CM4F/portmacro.h index a59418cb1ab..3371f34f47a 100644 --- a/portable/Tasking/ARM_CM4F/portmacro.h +++ b/portable/Tasking/ARM_CM4F/portmacro.h @@ -58,16 +58,18 @@ typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h index ac00ff54ab0..b82e48695d9 100644 --- a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h +++ b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h @@ -60,12 +60,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; typedef void (*portvectorfunc)(void); -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h b/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h index d41ad8534cf..49b15b09959 100644 --- a/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h +++ b/portable/ThirdParty/GCC/ARC_EM_HS/portmacro.h @@ -82,12 +82,14 @@ typedef portSTACK_TYPE StackType_t; typedef long BaseType_t; typedef unsigned long UBaseType_t; -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef unsigned int TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif #define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 ) diff --git a/portable/ThirdParty/GCC/ARC_v1/portmacro.h b/portable/ThirdParty/GCC/ARC_v1/portmacro.h index d8850e75897..72fbb49759e 100644 --- a/portable/ThirdParty/GCC/ARC_v1/portmacro.h +++ b/portable/ThirdParty/GCC/ARC_v1/portmacro.h @@ -81,12 +81,14 @@ typedef long BaseType_t; typedef unsigned long UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef unsigned int TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif #define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 ) diff --git a/portable/ThirdParty/GCC/ATmega/portmacro.h b/portable/ThirdParty/GCC/ATmega/portmacro.h index 40e59ff2ba6..3e7714eb836 100644 --- a/portable/ThirdParty/GCC/ATmega/portmacro.h +++ b/portable/ThirdParty/GCC/ATmega/portmacro.h @@ -56,12 +56,14 @@ typedef uint8_t StackType_t; typedef int8_t BaseType_t; typedef uint8_t UBaseType_t; -#if configUSE_16_BIT_TICKS == 1 +#if configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 105600d1305..f19e96a0141 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -58,16 +58,18 @@ typedef int32_t BaseType_t; typedef uint32_t UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index 550c1794d50..41dd114a001 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -706,24 +706,24 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) static inline EventBits_t prvGetEventGroupBit( spin_lock_t * spinLock ) { uint32_t ulBit; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) ulBit = 1u << (spin_lock_get_num(spinLock) & 0x7u); - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) ulBit = 1u << spin_lock_get_num(spinLock); /* reduce to range 0-24 */ ulBit |= ulBit << 8u; ulBit >>= 8u; - #endif /* configUSE_16_BIT_TICKS */ + #endif /* configTICK_TYPE_WIDTH_IN_BITS */ return ( EventBits_t ) ulBit; } static inline EventBits_t prvGetAllEventGroupBits() { - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) return (EventBits_t) 0xffu; - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) return ( EventBits_t ) 0xffffffu; - #endif /* configUSE_16_BIT_TICKS */ + #endif /* configTICK_TYPE_WIDTH_IN_BITS */ } void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock, uint32_t ulSave ) diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index d66ca3da5a0..e87d560cfcd 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -119,12 +119,14 @@ typedef portBASE_TYPE BaseType_t; typedef unsigned portBASE_TYPE UBaseType_t; - #if ( configUSE_16_BIT_TICKS == 1 ) + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff - #else + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/XCC/Xtensa/portmacro.h b/portable/ThirdParty/XCC/Xtensa/portmacro.h index a0b87a007a6..c81576c9f78 100644 --- a/portable/ThirdParty/XCC/Xtensa/portmacro.h +++ b/portable/ThirdParty/XCC/Xtensa/portmacro.h @@ -70,12 +70,14 @@ typedef portSTACK_TYPE StackType_t; typedef portBASE_TYPE BaseType_t; typedef unsigned portBASE_TYPE UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/WizC/PIC18/portmacro.h b/portable/WizC/PIC18/portmacro.h index fea77758b77..ad70560e4ad 100644 --- a/portable/WizC/PIC18/portmacro.h +++ b/portable/WizC/PIC18/portmacro.h @@ -58,12 +58,14 @@ typedef signed char BaseType_t; typedef unsigned char UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) ( 0xFFFF ) -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif #define portBYTE_ALIGNMENT 1 diff --git a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h index 29e8270b571..a2b4c16a40b 100644 --- a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h +++ b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h @@ -58,12 +58,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/portable/oWatcom/16BitDOS/PC/portmacro.h b/portable/oWatcom/16BitDOS/PC/portmacro.h index 6c8e022b410..ab4eea87210 100644 --- a/portable/oWatcom/16BitDOS/PC/portmacro.h +++ b/portable/oWatcom/16BitDOS/PC/portmacro.h @@ -57,12 +57,14 @@ typedef short BaseType_t; typedef unsigned short UBaseType_t; -#if( configUSE_16_BIT_TICKS == 1 ) +#if( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) typedef uint16_t TickType_t; #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL #else - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif /*-----------------------------------------------------------*/ diff --git a/tasks.c b/tasks.c index ac221afe739..845af7daa7b 100644 --- a/tasks.c +++ b/tasks.c @@ -241,10 +241,12 @@ * the scheduler that the value should not be changed - in which case it is the * responsibility of whichever module is using the value to ensure it gets set back * to its original value when it is released. */ -#if ( configUSE_16_BIT_TICKS == 1 ) +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U -#else +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000000000000000ULL #endif /* From 050cf0d80f4a25c06a37efa7ab6f1b4c877a84b0 Mon Sep 17 00:00:00 2001 From: bbain <16752579+bbain@users.noreply.github.com> Date: Mon, 13 Feb 2023 15:58:20 +1100 Subject: [PATCH 08/62] Introduce portMEMORY_BARRIER for Microblaze port. (#621) The introduction of `portMEMORY_BARRIER` will ensure the places in the kernel use a barrier will work. For example, `xTaskResumeAll` has a memory barrier to ensure its correctness when compiled with optimization enabled. Without the barrier `xTaskResumeAll` can fail (e.g. start reading and writing to address 0 and/or infinite looping) when `xPendingReadyList` contains more than one task to restore. In `xTaskResumeAll` the compiler chooses to cache the `pxTCB` the first time through the loop for use in every subsequent loop. This is incorrect as the removal of `pxTCB->xEventListItem` will actually change the value of `pxTCB` if it was read again at the top of the loop. The barrier forces the compiler to read `pxTCB` again at the top of the loop. The compiler is operating correctly. The removal `pxTCB->xEventListItem` executes on a `List_t *` and `ListItem_t *`. This means that the compiler can assume that any `MiniListItem_t` values are unchanged by the loop (i.e. "strict-aliasing"). This allows the compiler to cache `pxTCB` as it is obtained via a `MiniListItem_t`. This is incorrect in this case because it is possible for a `ListItem_t *` to actually alias a `MiniListItem_t`. This is technically a "violation of aliasing rules" so we use the the barrier to disable the strict-aliasing optimization in this loop. --- portable/GCC/MicroBlaze/portmacro.h | 1 + portable/GCC/MicroBlazeV8/portmacro.h | 1 + portable/GCC/MicroBlazeV9/portmacro.h | 1 + 3 files changed, 3 insertions(+) diff --git a/portable/GCC/MicroBlaze/portmacro.h b/portable/GCC/MicroBlaze/portmacro.h index 7d96d5c9004..c646496ab3f 100644 --- a/portable/GCC/MicroBlaze/portmacro.h +++ b/portable/GCC/MicroBlaze/portmacro.h @@ -115,6 +115,7 @@ void vTaskSwitchContext(); #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portNOP() asm volatile ( "NOP" ) +#define portMEMORY_BARRIER() asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ diff --git a/portable/GCC/MicroBlazeV8/portmacro.h b/portable/GCC/MicroBlazeV8/portmacro.h index a4237336f2e..be9dfacc42f 100644 --- a/portable/GCC/MicroBlazeV8/portmacro.h +++ b/portable/GCC/MicroBlazeV8/portmacro.h @@ -152,6 +152,7 @@ extern volatile uint32_t ulTaskSwitchRequested; #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portNOP() asm volatile ( "NOP" ) +#define portMEMORY_BARRIER() asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ diff --git a/portable/GCC/MicroBlazeV9/portmacro.h b/portable/GCC/MicroBlazeV9/portmacro.h index 84b358f5b5a..c26daa79fc2 100644 --- a/portable/GCC/MicroBlazeV9/portmacro.h +++ b/portable/GCC/MicroBlazeV9/portmacro.h @@ -152,6 +152,7 @@ extern volatile uint32_t ulTaskSwitchRequested; #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portNOP() asm volatile ( "NOP" ) +#define portMEMORY_BARRIER() asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ #if( XPAR_MICROBLAZE_USE_STACK_PROTECTION ) From 8252edec4547afed4e6ada74bd9353b52972894f Mon Sep 17 00:00:00 2001 From: Ju1He1 <93189163+Ju1He1@users.noreply.github.com> Date: Wed, 15 Feb 2023 07:10:32 +0100 Subject: [PATCH 09/62] Do not call exit() on MSVC Port when calling vPortEndScheduler (#624) * make port exitable * correctly set xPortRunning to False * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> * add suggestions from Review Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/MSVC-MingW/port.c | 44 +++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/portable/MSVC-MingW/port.c b/portable/MSVC-MingW/port.c index 67b79baa917..f39f0ecbb88 100644 --- a/portable/MSVC-MingW/port.c +++ b/portable/MSVC-MingW/port.c @@ -160,7 +160,7 @@ TIMECAPS xTimeCaps; /* Just to prevent compiler warnings. */ ( void ) lpParameter; - for( ;; ) + while( xPortRunning == pdTRUE ) { /* Wait until the timer expires and we can access the simulated interrupt variables. *NOTE* this is not a 'real time' way of generating tick @@ -177,32 +177,32 @@ TIMECAPS xTimeCaps; Sleep( portTICK_PERIOD_MS ); } - configASSERT( xPortRunning ); + if( xPortRunning == pdTRUE ) + { + configASSERT( xPortRunning ); - /* Can't proceed if in a critical section as pvInterruptEventMutex won't - be available. */ - WaitForSingleObject( pvInterruptEventMutex, INFINITE ); + /* Can't proceed if in a critical section as pvInterruptEventMutex won't + be available. */ + WaitForSingleObject( pvInterruptEventMutex, INFINITE ); - /* The timer has expired, generate the simulated tick event. */ - ulPendingInterrupts |= ( 1 << portINTERRUPT_TICK ); + /* The timer has expired, generate the simulated tick event. */ + ulPendingInterrupts |= ( 1 << portINTERRUPT_TICK ); - /* The interrupt is now pending - notify the simulated interrupt - handler thread. Must be outside of a critical section to get here so - the handler thread can execute immediately pvInterruptEventMutex is - released. */ - configASSERT( ulCriticalNesting == 0UL ); - SetEvent( pvInterruptEvent ); + /* The interrupt is now pending - notify the simulated interrupt + handler thread. Must be outside of a critical section to get here so + the handler thread can execute immediately pvInterruptEventMutex is + released. */ + configASSERT( ulCriticalNesting == 0UL ); + SetEvent( pvInterruptEvent ); - /* Give back the mutex so the simulated interrupt handler unblocks - and can access the interrupt handler variables. */ - ReleaseMutex( pvInterruptEventMutex ); + /* Give back the mutex so the simulated interrupt handler unblocks + and can access the interrupt handler variables. */ + ReleaseMutex( pvInterruptEventMutex ); + } } - #ifdef __GNUC__ - /* Should never reach here - MingW complains if you leave this line out, - MSVC complains if you put it in. */ - return 0; - #endif + + return 0; } /*-----------------------------------------------------------*/ @@ -566,7 +566,7 @@ uint32_t ulErrorCode; void vPortEndScheduler( void ) { - exit( 0 ); + xPortRunning = pdFALSE; } /*-----------------------------------------------------------*/ From ba1deb54e8fb76b8325c8ca9fe44abd5daed8ddf Mon Sep 17 00:00:00 2001 From: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Mon, 20 Feb 2023 13:16:57 -0800 Subject: [PATCH 10/62] Update PR template to include checkbox for Unit Test related changes (#627) --- .github/pull_request_template.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c3c8607fbde..a39aa2bfa33 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -8,6 +8,13 @@ Test Steps ----------- +Checklist: +---------- + + +- [ ] I have tested my changes. No regression in existing tests. +- [ ] I have modified and/or added unit-tests to cover the code changes in this Pull Request. + Related Issue ----------- From 5d056010455613d7cfda28f27da26573000773fd Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Thu, 23 Feb 2023 09:37:42 +0530 Subject: [PATCH 11/62] Fix build failure introduced in PR #597 (#629) The PR #597 introduced a new config option configTICK_TYPE_WIDTH_IN_BITS which can be defined to one of the following: * TICK_TYPE_WIDTH_16_BITS - Tick type is 16 bit wide. * TICK_TYPE_WIDTH_32_BITS - Tick type is 32 bit wide. * TICK_TYPE_WIDTH_64_BITS - Tick type is 64 bit wide. Earlier we supported 16 and 32 bit width for tick type which was controlled using the config option configUSE_16_BIT_TICKS. The PR tried to maintain backward compatibility by honoring configUSE_16_BIT_TICKS. The backward compatibility did not work as expected though, as the macro configTICK_TYPE_WIDTH_IN_BITS was used before it was defined. This PR addresses it by ensuring that the macro configTICK_TYPE_WIDTH_IN_BITS is defined before it is used. Testing 1. configUSE_16_BIT_TICKS is defined to 0. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 2. configUSE_16_BIT_TICKS is defined to 1. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 3. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_16_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 10e2: 4b53 ldr r3, [pc, #332] ; (1230 ) 10e4: f8b3 4134 ldrh.w r4, [r3, #308] ; 0x134 10e8: b2a4 uxth r4, r4 10ea: 3401 adds r4, #1 10ec: b2a4 uxth r4, r4 10ee: f8a3 4134 strh.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 16 bit. 4. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_32_BITS. Source (function xTaskIncrementTick in tasks.c): ``` const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; ``` Assembly: ``` 109e: 4b50 ldr r3, [pc, #320] ; (11e0 ) 10a0: f8d3 4134 ldr.w r4, [r3, #308] ; 0x134 10a4: 3401 adds r4, #1 10a6: f8c3 4134 str.w r4, [r3, #308] ; 0x134 ``` It is clear from assembly that the tick type is 32 bit. 5. configTICK_TYPE_WIDTH_IN_BITS is defined to TICK_TYPE_WIDTH_64_BITS. ``` #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. ``` The testing was done for GCC/ARM_CM3 port which does not support 64 bit tick type. 6. Neither configUSE_16_BIT_TICKS nor configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` 7. Both configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS defined. ``` #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. ``` Related issue - https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/628 Signed-off-by: Gaurav Aggarwal --- include/FreeRTOS.h | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 790a06db058..b7a654961e2 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -63,6 +63,24 @@ /* Application specific configuration options. */ #include "FreeRTOSConfig.h" +#if !defined( configUSE_16_BIT_TICKS ) && !defined( configTICK_TYPE_WIDTH_IN_BITS ) + #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +#if defined( configUSE_16_BIT_TICKS ) && defined( configTICK_TYPE_WIDTH_IN_BITS ) + #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. +#endif + +/* Define configTICK_TYPE_WIDTH_IN_BITS according to the + * value of configUSE_16_BIT_TICKS for backward compatibility. */ +#ifndef configTICK_TYPE_WIDTH_IN_BITS + #if ( configUSE_16_BIT_TICKS == 1 ) + #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_16_BITS + #else + #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_32_BITS + #endif +#endif + /* Basic FreeRTOS definitions. */ #include "projdefs.h" @@ -160,24 +178,6 @@ #error Missing definition: configUSE_TICK_HOOK must be defined in FreeRTOSConfig.h as either 1 or 0. See the Configuration section of the FreeRTOS API documentation for details. #endif -#if !defined( configUSE_16_BIT_TICKS ) && !defined( configTICK_TYPE_WIDTH_IN_BITS ) - #error Missing definition: One of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. -#endif - -#if defined( configUSE_16_BIT_TICKS ) && defined( configTICK_TYPE_WIDTH_IN_BITS ) - #error Only one of configUSE_16_BIT_TICKS and configTICK_TYPE_WIDTH_IN_BITS must be defined in FreeRTOSConfig.h. See the Configuration section of the FreeRTOS API documentation for details. -#endif - -/* Define configTICK_TYPE_WIDTH_IN_BITS according to the - * value of configUSE_16_BIT_TICKS for backward compatibility. */ -#ifndef configTICK_TYPE_WIDTH_IN_BITS - #if ( configUSE_16_BIT_TICKS == 1 ) - #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_16_BITS - #else - #define configTICK_TYPE_WIDTH_IN_BITS TICK_TYPE_WIDTH_32_BITS - #endif -#endif - #if ( ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_16_BITS ) && \ ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_32_BITS ) && \ ( configTICK_TYPE_WIDTH_IN_BITS != TICK_TYPE_WIDTH_64_BITS ) ) From 8cd5451ad50819b3a969b250ac1fd3528ef56ece Mon Sep 17 00:00:00 2001 From: phelter Date: Thu, 23 Feb 2023 10:05:04 -0800 Subject: [PATCH 12/62] Feature/fixing clang gnu compiler warnings (#620) * Adding in ability to support a library for freertos_config and a custom freertos_kernel_port (#558) * Using single name definition for libraries everywhere. (#558) * Supporting backwards compatibility with FREERTOS_CONFIG_FILE_DIRECTORY (#571) * Removing compiler warnings for GNU and Clang. (#571) * Added in documentation on how to consume from a main project. Added default PORT selection for native POSIX and MINGW platforms. * Only adding freertos_config if it exists. Removing auto generation of it from a FREERTOS_CONFIG_FILE_DIRECTORY. * Fixing clang and gnu compiler warnings. * Adding in project information and how to compile for GNU/clang * Fixing compiler issue with unused variable - no need to declare variable. * Adding in compile warnings for linux builds that kernel is okay with using. * Fixing more extra-semi-stmt clang warnings. * Moving definition of hooks into header files if features are enabled. * Fixing formatting with uncrustify. * Fixing merge conflicts with main merge. * Fixing compiler errors due to merge issues and formatting. * Fixing Line feeds. * Adding 'portNORETURN' into portmacros.h. Other Updates based on PR request * Further clean-up of clang and clang-tidy issues. * Removing compiler specific pragmas from common c files. * Fixing missing lexicon entry and uncrustify formatting changes. * Resolving merge issue multiple defnitions of proto for prvIdleTask * Fixing formatting issues that are not covered by uncrustify. Use clang-tidy instead if you want this level of control. * More uncrustify formatting issues. * Fixing extra bracket in #if statement. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- .github/lexicon.txt | 1 + CMakeLists.txt | 41 ++++++ event_groups.c | 10 +- include/FreeRTOS.h | 4 + include/list.h | 36 ++--- include/queue.h | 6 +- include/stack_macros.h | 4 +- include/task.h | 4 +- include/timers.h | 14 ++ .../portable/GCC/ARM_CM23/portmacro.h | 1 + .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 1 + .../portable/GCC/ARM_CM33/portmacro.h | 1 + .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 1 + .../portable/GCC/ARM_CM55/portmacro.h | 1 + .../portable/GCC/ARM_CM85/portmacro.h | 1 + portable/GCC/ARM_CM0/portmacro.h | 1 + portable/GCC/ARM_CM23/non_secure/portmacro.h | 1 + .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM3/portmacro.h | 1 + portable/GCC/ARM_CM33/non_secure/portmacro.h | 1 + .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM3_MPU/portmacro.h | 1 + portable/GCC/ARM_CM4F/portmacro.h | 1 + portable/GCC/ARM_CM4_MPU/portmacro.h | 1 + portable/GCC/ARM_CM55/non_secure/portmacro.h | 1 + .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 1 + portable/GCC/ARM_CM7/r0p1/portmacro.h | 1 + portable/GCC/ARM_CM85/non_secure/portmacro.h | 1 + .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 1 + portable/MemMang/heap_4.c | 8 +- portable/ThirdParty/GCC/Posix/port.c | 7 +- portable/ThirdParty/GCC/Posix/portmacro.h | 2 + .../ThirdParty/GCC/RP2040/include/portmacro.h | 1 + .../GCC/Xtensa_ESP32/include/portmacro.h | 2 + queue.c | 64 ++++----- stream_buffer.c | 56 ++++---- tasks.c | 128 +++++++++--------- timers.c | 24 ++-- 38 files changed, 260 insertions(+), 172 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 765b05c1eb5..47ce4ccde11 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -2458,6 +2458,7 @@ vaninterruptserviceroutine vanisr vanothertask vapplicationcleartimerinterrupt +vapplicationdaemontaskstartuphook vapplicationexceptionregisterdump vapplicationfpusafeirqhandler vapplicationgetidletaskmemory diff --git a/CMakeLists.txt b/CMakeLists.txt index 09bd4bdb84d..3224a97653b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -222,6 +222,47 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " freertos_kernel)") endif() +######################################################################## +# Requirements +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED ON) + +######################################################################## +# Overall Compile Options +# Note the compile option strategy is to error on everything and then +# Per library opt-out of things that are warnings/errors. +# This ensures that no matter what strategy for compilation you take, the +# builds will still occur. +# +# Only tested with GNU and Clang. +# Other options are https://cmake.org/cmake/help/latest/variable/CMAKE_LANG_COMPILER_ID.html#variable:CMAKE_%3CLANG%3E_COMPILER_ID +# Naming of compilers translation map: +# +# FreeRTOS | CMake +# ------------------- +# CCS | ?TBD? +# GCC | GNU, Clang, *Clang Others? +# IAR | IAR +# Keil | ARMCC +# MSVC | MSVC # Note only for MinGW? +# Renesas | ?TBD? + +add_compile_options( + ### Gnu/Clang C Options + $<$:-fdiagnostics-color=always> + $<$:-fcolor-diagnostics> + + $<$:-Wall> + $<$:-Wextra> + $<$:-Wpedantic> + $<$:-Werror> + $<$:-Weverything> + + # TODO: Add in other Compilers here. +) + + +######################################################################## add_subdirectory(portable) add_library(freertos_kernel STATIC diff --git a/event_groups.c b/event_groups.c index a64c5ead34c..7c86e605a88 100644 --- a/event_groups.c +++ b/event_groups.c @@ -69,14 +69,14 @@ typedef struct EventGroupDef_t { EventBits_t uxEventBits; - List_t xTasksWaitingForBits; /*< List of tasks waiting for a bit to be set. */ + List_t xTasksWaitingForBits; /**< List of tasks waiting for a bit to be set. */ #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxEventGroupNumber; #endif #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */ #endif } EventGroup_t; @@ -521,15 +521,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index b7a654961e2..e720480dc64 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -904,6 +904,10 @@ #define portDONT_DISCARD #endif +#ifndef portNORETURN + #define portNORETURN +#endif + #ifndef configUSE_TIME_SLICING #define configUSE_TIME_SLICING 1 #endif diff --git a/include/list.h b/include/list.h index 248212260a1..c86eb7165f8 100644 --- a/include/list.h +++ b/include/list.h @@ -143,20 +143,20 @@ struct xLIST; struct xLIST_ITEM { - listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ - configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in ascending order. */ - struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ - struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ - void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ - struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ - listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /**< The value being listed. In most cases this is used to sort the list in ascending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /**< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /**< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /**< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /**< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ }; typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ #if ( configUSE_MINI_LIST_ITEM == 1 ) struct xMINI_LIST_ITEM { - listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ configLIST_VOLATILE TickType_t xItemValue; struct xLIST_ITEM * configLIST_VOLATILE pxNext; struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; @@ -171,11 +171,11 @@ typedef struct xLIST_ITEM ListItem_t; /* For some reason lint */ typedef struct xLIST { - listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ volatile UBaseType_t uxNumberOfItems; - ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ - MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ - listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + ListItem_t * configLIST_VOLATILE pxIndex; /**< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + MiniListItem_t xListEnd; /**< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /**< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ } List_t; /* @@ -283,7 +283,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ - { \ + do { \ List_t * const pxConstList = ( pxList ); \ /* Increment the index to the next item and return the item, ensuring */ \ /* we don't return the marker used at the end of the list. */ \ @@ -293,7 +293,7 @@ typedef struct xLIST ( pxConstList )->pxIndex = ( pxConstList )->xListEnd.pxNext; \ } \ ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ - } + } while( 0 ) /* * Version of uxListRemove() that does not return a value. Provided as a slight @@ -312,7 +312,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listREMOVE_ITEM( pxItemToRemove ) \ - { \ + do { \ /* The list item knows which list it is in. Obtain the list from the list \ * item. */ \ List_t * const pxList = ( pxItemToRemove )->pxContainer; \ @@ -327,7 +327,7 @@ typedef struct xLIST \ ( pxItemToRemove )->pxContainer = NULL; \ ( pxList->uxNumberOfItems )--; \ - } + } while( 0 ) /* * Inline version of vListInsertEnd() to provide slight optimisation for @@ -352,7 +352,7 @@ typedef struct xLIST * \ingroup LinkedList */ #define listINSERT_END( pxList, pxNewListItem ) \ - { \ + do { \ ListItem_t * const pxIndex = ( pxList )->pxIndex; \ \ /* Only effective when configASSERT() is also defined, these tests may catch \ @@ -374,7 +374,7 @@ typedef struct xLIST ( pxNewListItem )->pxContainer = ( pxList ); \ \ ( ( pxList )->uxNumberOfItems )++; \ - } + } while( 0 ) /* * Access function to obtain the owner of the first entry in a list. Lists diff --git a/include/queue.h b/include/queue.h index 02356766abb..f488a2e7641 100644 --- a/include/queue.h +++ b/include/queue.h @@ -1346,9 +1346,9 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * @param pvBuffer Pointer to the buffer into which the received item will * be copied. * - * @param pxTaskWoken A task may be blocked waiting for space to become - * available on the queue. If xQueueReceiveFromISR causes such a task to - * unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will + * @param pxHigherPriorityTaskWoken A task may be blocked waiting for space to + * become available on the queue. If xQueueReceiveFromISR causes such a task + * to unblock *pxTaskWoken will get set to pdTRUE, otherwise *pxTaskWoken will * remain unchanged. * * @return pdTRUE if an item was successfully received from the queue, diff --git a/include/stack_macros.h b/include/stack_macros.h index 9b36959baea..7ffc7b34338 100644 --- a/include/stack_macros.h +++ b/include/stack_macros.h @@ -87,7 +87,7 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ \ @@ -98,7 +98,7 @@ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ /*-----------------------------------------------------------*/ diff --git a/include/task.h b/include/task.h index f4e72c2c2f5..08c14dd6ff1 100644 --- a/include/task.h +++ b/include/task.h @@ -658,7 +658,7 @@ typedef enum * * @param xTask The handle of the task being updated. * - * @param xRegions A pointer to a MemoryRegion_t structure that contains the + * @param[in] pxRegions A pointer to a MemoryRegion_t structure that contains the * new memory region definitions. * * Example usage: @@ -1667,7 +1667,7 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL #endif -#if ( configUSE_TICK_HOOK > 0 ) +#if ( configUSE_TICK_HOOK != 0 ) /** * task.h diff --git a/include/timers.h b/include/timers.h index d255c3986e7..6a064d62a41 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1361,6 +1361,20 @@ BaseType_t xTimerGenericCommand( TimerHandle_t xTimer, #endif +#if ( configUSE_DAEMON_TASK_STARTUP_HOOK != 0 ) + +/** + * timers.h + * @code{c} + * void vApplicationDaemonTaskStartupHook( void ); + * @endcode + * + * This hook function is called form the timer task once when the task starts running. + */ + void vApplicationDaemonTaskStartupHook( void ); + +#endif + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index b654748e138..35c160f3407 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index 830fa2c1379..69d32d7f57a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index 6d60fce4c90..408162d6402 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -77,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index f98b8f277cb..2891a6c7e08 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index c1f7b899217..dd729f13f2c 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -77,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 943c665cc43..53c1020008d 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -50,6 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index d3b3af58f4c..25058026973 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -113,6 +113,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index 1ffdc5b8b93..a3b2b46c989 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -80,6 +80,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index f3365d14371..df23d95381f 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -203,6 +203,7 @@ typedef struct MPU_SETTINGS #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index b654748e138..35c160f3407 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index b654748e138..35c160f3407 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index d65a4c24932..897fef5f468 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -77,6 +77,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index 830fa2c1379..69d32d7f57a 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index 830fa2c1379..69d32d7f57a 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -55,6 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index f97a1edc12c..8ca7c81e4f3 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -96,8 +96,8 @@ * of their memory address. */ typedef struct A_BLOCK_LINK { - struct A_BLOCK_LINK * pxNextFreeBlock; /*<< The next free block in the list. */ - size_t xBlockSize; /*<< The size of the free block. */ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ } BlockLink_t; /*-----------------------------------------------------------*/ @@ -390,7 +390,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ { uxAddress += ( portBYTE_ALIGNMENT - 1 ); uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); - xTotalHeapSize -= uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap; + xTotalHeapSize -= ( size_t ) ( uxAddress - ( portPOINTER_SIZE_TYPE ) ucHeap ); } pucAlignedHeap = ( uint8_t * ) uxAddress; @@ -402,7 +402,7 @@ static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */ /* pxEnd is used to mark the end of the list of free blocks and is inserted * at the end of the heap space. */ - uxAddress = ( ( portPOINTER_SIZE_TYPE ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress = ( portPOINTER_SIZE_TYPE ) ( pucAlignedHeap + xTotalHeapSize ); uxAddress -= xHeapStructSize; uxAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ); pxEnd = ( BlockLink_t * ) uxAddress; diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index bd0842fbf55..82aa27ba889 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -115,6 +115,9 @@ static void prvPortYieldFromISR( void ); /*-----------------------------------------------------------*/ static void prvFatalError( const char * pcCall, + int iErrno ) __attribute__ ((__noreturn__)); + +void prvFatalError( const char * pcCall, int iErrno ) { fprintf( stderr, "%s: %s\n", pcCall, strerror( iErrno ) ); @@ -141,7 +144,7 @@ portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, */ thread = ( Thread_t * ) ( pxTopOfStack + 1 ) - 1; pxTopOfStack = ( portSTACK_TYPE * ) thread - 1; - ulStackSize = ( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); + ulStackSize = ( size_t )( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); thread->pxCode = pxCode; thread->pvParams = pvParameters; @@ -340,7 +343,7 @@ static uint64_t prvGetTimeNs( void ) clock_gettime( CLOCK_MONOTONIC, &t ); - return t.tv_sec * 1000000000ULL + t.tv_nsec; + return ( uint64_t )t.tv_sec * 1000000000ULL + ( uint64_t )t.tv_nsec; } static uint64_t prvStartTimeNs; diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index a5173871359..2061c323642 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -68,6 +68,8 @@ typedef unsigned long TickType_t; /*-----------------------------------------------------------*/ /* Architecture specifics. */ +#define portNORETURN __attribute__( ( noreturn ) ) + #define portSTACK_GROWTH ( -1 ) #define portHAS_STACK_OVERFLOW_CHECKING ( 1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index f19e96a0141..1043d2c5587 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -78,6 +78,7 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) + #define portNORETURN __attribute__( ( noreturn ) ) /* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config, * as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */ #define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index e87d560cfcd..575659ac597 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -335,6 +335,8 @@ /*-----------------------------------------------------------*/ /* Architecture specifics. */ + #define portNORETURN __attribute__( ( noreturn ) ) + #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 4 diff --git a/queue.c b/queue.c index 21e398f15ba..ca6bfbc60dc 100644 --- a/queue.c +++ b/queue.c @@ -64,14 +64,14 @@ typedef struct QueuePointers { - int8_t * pcTail; /*< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ - int8_t * pcReadFrom; /*< Points to the last place that a queued item was read from when the structure is used as a queue. */ + int8_t * pcTail; /**< Points to the byte at the end of the queue storage area. Once more byte is allocated than necessary to store the queue items, this is used as a marker. */ + int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */ } QueuePointers_t; typedef struct SemaphoreData { - TaskHandle_t xMutexHolder; /*< The handle of the task that holds the mutex. */ - UBaseType_t uxRecursiveCallCount; /*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ + TaskHandle_t xMutexHolder; /**< The handle of the task that holds the mutex. */ + UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */ } SemaphoreData_t; /* Semaphores do not actually store or copy data, so have an item size of @@ -95,27 +95,27 @@ typedef struct SemaphoreData */ typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - int8_t * pcHead; /*< Points to the beginning of the queue storage area. */ - int8_t * pcWriteTo; /*< Points to the free next place in the storage area. */ + int8_t * pcHead; /**< Points to the beginning of the queue storage area. */ + int8_t * pcWriteTo; /**< Points to the free next place in the storage area. */ union { - QueuePointers_t xQueue; /*< Data required exclusively when this structure is used as a queue. */ - SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */ + QueuePointers_t xQueue; /**< Data required exclusively when this structure is used as a queue. */ + SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */ } u; - List_t xTasksWaitingToSend; /*< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ - List_t xTasksWaitingToReceive; /*< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ + List_t xTasksWaitingToSend; /**< List of tasks that are blocked waiting to post onto this queue. Stored in priority order. */ + List_t xTasksWaitingToReceive; /**< List of tasks that are blocked waiting to read from this queue. Stored in priority order. */ - volatile UBaseType_t uxMessagesWaiting; /*< The number of items currently in the queue. */ - UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ - UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ + volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */ + UBaseType_t uxLength; /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */ + UBaseType_t uxItemSize; /**< The size of each items that the queue will hold. */ - volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ - volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cRxLock; /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ + volatile int8_t cTxLock; /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( configUSE_QUEUE_SETS == 1 ) @@ -264,14 +264,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * tasks than the number of tasks in the system. */ #define prvIncrementQueueTxLock( pxQueue, cTxLock ) \ - { \ + do { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks ) \ { \ configASSERT( ( cTxLock ) != queueINT8_MAX ); \ ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \ } \ - } + } while( 0 ) /* * Macro to increment cRxLock member of the queue data structure. It is @@ -279,14 +279,14 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, * tasks than the number of tasks in the system. */ #define prvIncrementQueueRxLock( pxQueue, cRxLock ) \ - { \ + do { \ const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks(); \ if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks ) \ { \ configASSERT( ( cRxLock ) != queueINT8_MAX ); \ ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \ } \ - } + } while( 0 ) /*-----------------------------------------------------------*/ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, @@ -1046,7 +1046,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const BaseType_t xCopyPosition ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1074,7 +1074,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1199,7 +1199,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1209,7 +1209,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; /* Similar to xQueueGenericSendFromISR() but used with semaphores where the @@ -1245,7 +1245,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1365,7 +1365,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1880,7 +1880,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1902,7 +1902,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1962,7 +1962,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1972,7 +1972,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) { BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; int8_t * pcOriginalReadPosition; Queue_t * const pxQueue = xQueue; @@ -1996,7 +1996,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2017,7 +2017,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index b81f072fb1a..0d0b3350acd 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -70,7 +70,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); + ( void ) xTaskResumeAll() #endif /* sbRECEIVE_COMPLETED */ /* If user has provided a per-instance receive complete callback, then @@ -95,10 +95,10 @@ #ifndef sbRECEIVE_COMPLETED_FROM_ISR #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ - { \ - UBaseType_t uxSavedInterruptStatus; \ + do { \ + portBASE_TYPE xSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,8 +109,8 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ - } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) @@ -147,7 +147,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - ( void ) xTaskResumeAll(); + ( void ) xTaskResumeAll() #endif /* sbSEND_COMPLETED */ /* If user has provided a per-instance send completed callback, then @@ -155,7 +155,7 @@ */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvSEND_COMPLETED( pxStreamBuffer ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ { \ pxStreamBuffer->pxSendCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ @@ -164,7 +164,7 @@ { \ sbSEND_COMPLETED( ( pxStreamBuffer ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETED( pxStreamBuffer ) sbSEND_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -172,10 +172,10 @@ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ - UBaseType_t uxSavedInterruptStatus; \ + do { \ + portBASE_TYPE xSavedInterruptStatus; \ \ - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); \ + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,14 +186,14 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ - } + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxSendCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxSendCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ @@ -202,7 +202,7 @@ { \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ sbSEND_COMPLETE_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) @@ -436,13 +436,11 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); #if ( configASSERT_DEFINED == 1 ) - { + /* Sanity check that the size of the structure used to declare a * variable of type StaticStreamBuffer_t equals the size of the real * message buffer structure. */ - volatile size_t xSize = sizeof( StaticStreamBuffer_t ); - configASSERT( xSize == sizeof( StreamBuffer_t ) ); - } /*lint !e529 xSize is referenced is configASSERT() is defined. */ + configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); #endif /* configASSERT_DEFINED */ if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) @@ -1188,11 +1186,11 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1208,7 +1206,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1219,11 +1217,11 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( pxStreamBuffer ); - uxSavedInterruptStatus = ( UBaseType_t ) portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1239,7 +1237,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -1372,9 +1370,9 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, /* The value written just has to be identifiable when looking at the * memory. Don't use 0xA5 as that is the stack fill value and could * result in confusion as to what is actually being observed. */ - const BaseType_t xWriteValue = 0x55; - configASSERT( memset( pucBuffer, ( int ) xWriteValue, xBufferSizeBytes ) == pucBuffer ); - } /*lint !e529 !e438 xWriteValue is only used if configASSERT() is defined. */ + #define STREAM_BUFFER_BUFFER_WRITE_VALUE ( 0x55 ) + configASSERT( memset( pucBuffer, ( int ) STREAM_BUFFER_BUFFER_WRITE_VALUE, xBufferSizeBytes ) == pucBuffer ); + } #endif ( void ) memset( ( void * ) pxStreamBuffer, 0x00, sizeof( StreamBuffer_t ) ); /*lint !e9087 memset() requires void *. */ diff --git a/tasks.c b/tasks.c index 845af7daa7b..2b73b1b9703 100644 --- a/tasks.c +++ b/tasks.c @@ -134,7 +134,7 @@ /*-----------------------------------------------------------*/ #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ + do { \ UBaseType_t uxTopPriority = uxTopReadyPriority; \ \ /* Find the highest priority queue that contains ready tasks. */ \ @@ -148,7 +148,7 @@ * the same priority get an equal share of the processor time. */ \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ uxTopReadyPriority = uxTopPriority; \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK */ + } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */ /*-----------------------------------------------------------*/ @@ -170,14 +170,14 @@ /*-----------------------------------------------------------*/ #define taskSELECT_HIGHEST_PRIORITY_TASK() \ - { \ + do { \ UBaseType_t uxTopPriority; \ \ /* Find the highest priority list that contains ready tasks. */ \ portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \ configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \ listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \ - } /* taskSELECT_HIGHEST_PRIORITY_TASK() */ + } while( 0 ) /*-----------------------------------------------------------*/ @@ -185,12 +185,12 @@ * is being referenced from a ready list. If it is referenced from a delayed * or suspended list then it won't be in a ready list. */ #define taskRESET_READY_PRIORITY( uxPriority ) \ - { \ + do { \ if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ { \ portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ } \ - } + } while( 0 ) #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ @@ -199,7 +199,7 @@ /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick * count overflows. */ #define taskSWITCH_DELAYED_LISTS() \ - { \ + do { \ List_t * pxTemp; \ \ /* The delayed tasks list should be empty when the lists are switched. */ \ @@ -210,7 +210,7 @@ pxOverflowDelayedTaskList = pxTemp; \ xNumOfOverflows++; \ prvResetNextTaskUnblockTime(); \ - } + } while( 0 ) /*-----------------------------------------------------------*/ @@ -218,11 +218,13 @@ * Place the task represented by pxTCB into the appropriate ready list for * the task. It is inserted at the end of the list. */ -#define prvAddTaskToReadyList( pxTCB ) \ - traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ - taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ - listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ - tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +#define prvAddTaskToReadyList( pxTCB ) \ + do { \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \ + } while( 0 ) /*-----------------------------------------------------------*/ /* @@ -256,33 +258,33 @@ */ typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ #if ( portUSING_MPU_WRAPPERS == 1 ) - xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ #endif - ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ - ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ - UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ - StackType_t * pxStack; /*< Points to the start of the stack. */ - char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /**< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */ + StackType_t * pxStack; /**< Points to the start of the stack. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) - StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ + StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */ #endif #if ( portCRITICAL_NESTING_IN_TCB == 1 ) - UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ #endif #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ - UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */ #endif #if ( configUSE_MUTEXES == 1 ) - UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */ UBaseType_t uxMutexesHeld; #endif @@ -295,11 +297,11 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to #endif #if ( configGENERATE_RUN_TIME_STATS == 1 ) - configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */ #endif #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) - configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ + configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) @@ -310,7 +312,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to /* See the comments in FreeRTOS.h with the definition of * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ - uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ #endif #if ( INCLUDE_xTaskAbortDelay == 1 ) @@ -334,23 +336,23 @@ portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL; * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but * doing so breaks some kernel aware debuggers and debuggers that rely on removing * the static qualifier. */ -PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ -PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ -PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ -PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ #if ( INCLUDE_vTaskDelete == 1 ) - PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */ PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; #endif #if ( INCLUDE_vTaskSuspend == 1 ) - PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */ + PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */ #endif @@ -370,7 +372,7 @@ PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE; PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ -PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /**< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority @@ -391,8 +393,8 @@ PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t /* Do not move these variables to function scope as doing so prevents the * code working with debuggers that need to remove the static qualifier. */ - PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ - PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /**< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /**< Holds the total amount of execution time as defined by the run time counter clock. */ #endif @@ -430,7 +432,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -1469,7 +1471,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) { TCB_t const * pxTCB; - UBaseType_t uxReturn, uxSavedInterruptState; + UBaseType_t uxReturn; + portBASE_TYPE xSavedInterruptState; /* RTOS ports that support interrupt nesting have the concept of a * maximum system call (or maximum API call) interrupt priority. @@ -1489,14 +1492,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptState ); return uxReturn; } @@ -1882,7 +1885,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { BaseType_t xYieldRequired = pdFALSE; TCB_t * const pxTCB = xTaskToResume; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToResume ); @@ -1904,7 +1907,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -1945,7 +1948,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xYieldRequired; } @@ -2303,7 +2306,7 @@ TickType_t xTaskGetTickCount( void ) TickType_t xTaskGetTickCountFromISR( void ) { TickType_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a maximum * system call (or maximum API call) interrupt priority. Interrupts that are @@ -2321,11 +2324,11 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { xReturn = xTickCount; } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -2535,7 +2538,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); #else - *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); #endif } } @@ -2956,18 +2959,18 @@ BaseType_t xTaskIncrementTick( void ) { TCB_t * pxTCB; TaskHookFunction_t xReturn; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; /* If xTask is NULL then set the calling task's hook. */ pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -3026,7 +3029,7 @@ void vTaskSwitchContext( void ) #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); #else - ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); #endif /* Add the amount of time the task has been running to the @@ -3426,7 +3429,8 @@ void vTaskMissedYield( void ) * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION( prvIdleTask, pvParameters ) + +portTASK_FUNCTION( prvIdleTask, pvParameters ) { /* Stop warnings. */ ( void ) pvParameters; @@ -4975,7 +4979,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5000,7 +5004,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -5094,7 +5098,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); return xReturn; } @@ -5110,7 +5114,7 @@ TickType_t uxTaskResetEventItemValue( void ) { TCB_t * pxTCB; uint8_t ucOriginalNotifyState; - UBaseType_t uxSavedInterruptStatus; + portBASE_TYPE xSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5135,7 +5139,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -5185,7 +5189,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ @@ -5269,7 +5273,7 @@ TickType_t uxTaskResetEventItemValue( void ) { configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn; - ulTotalTime = portGET_RUN_TIME_COUNTER_VALUE(); + ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE(); /* For percentage calculations. */ ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100; diff --git a/timers.c b/timers.c index 4894a309dc3..06a29279597 100644 --- a/timers.c +++ b/timers.c @@ -74,15 +74,15 @@ /* The definition of the timers themselves. */ typedef struct tmrTimerControl /* The old naming convention is used to prevent breaking kernel aware debuggers. */ { - const char * pcTimerName; /*<< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ - ListItem_t xTimerListItem; /*<< Standard linked list item as used by all kernel features for event management. */ - TickType_t xTimerPeriodInTicks; /*<< How quickly and often the timer expires. */ - void * pvTimerID; /*<< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ - TimerCallbackFunction_t pxCallbackFunction; /*<< The function that will be called when the timer expires. */ + const char * pcTimerName; /**< Text name. This is not used by the kernel, it is included simply to make debugging easier. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + ListItem_t xTimerListItem; /**< Standard linked list item as used by all kernel features for event management. */ + TickType_t xTimerPeriodInTicks; /**< How quickly and often the timer expires. */ + void * pvTimerID; /**< An ID to identify the timer. This allows the timer to be identified when the same callback is used for multiple timers. */ + TimerCallbackFunction_t pxCallbackFunction; /**< The function that will be called when the timer expires. */ #if ( configUSE_TRACE_FACILITY == 1 ) - UBaseType_t uxTimerNumber; /*<< An ID assigned by trace tools such as FreeRTOS+Trace */ + UBaseType_t uxTimerNumber; /**< An ID assigned by trace tools such as FreeRTOS+Trace */ #endif - uint8_t ucStatus; /*<< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ + uint8_t ucStatus; /**< Holds bits to say if the timer was statically allocated or not, and if it is active or not. */ } xTIMER; /* The old xTIMER name is maintained above then typedefed to the new Timer_t @@ -96,8 +96,8 @@ * and xCallbackParametersType respectively. */ typedef struct tmrTimerParameters { - TickType_t xMessageValue; /*<< An optional value used by a subset of commands, for example, when changing the period of a timer. */ - Timer_t * pxTimer; /*<< The timer to which the command will be applied. */ + TickType_t xMessageValue; /**< An optional value used by a subset of commands, for example, when changing the period of a timer. */ + Timer_t * pxTimer; /**< The timer to which the command will be applied. */ } TimerParameter_t; @@ -112,7 +112,7 @@ * that is used to determine which message type is valid. */ typedef struct tmrTimerQueueMessage { - BaseType_t xMessageID; /*<< The command being sent to the timer service task. */ + BaseType_t xMessageID; /**< The command being sent to the timer service task. */ union { TimerParameter_t xTimerParameters; @@ -158,7 +158,7 @@ * task. Other tasks communicate with the timer service task using the * xTimerQueue queue. */ - static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; /* * Called by the timer service task to interpret and process a command it @@ -575,8 +575,6 @@ #if ( configUSE_DAEMON_TASK_STARTUP_HOOK == 1 ) { - extern void vApplicationDaemonTaskStartupHook( void ); - /* Allow the application writer to execute some code in the context of * this task at the point the task starts executing. This is useful if the * application includes initialisation code that would benefit from From cd87681789dd3317b1291a0b32800c64b8fd850f Mon Sep 17 00:00:00 2001 From: jacky309 Date: Mon, 27 Feb 2023 19:21:11 +0100 Subject: [PATCH 13/62] POSIX port fixes (#626) * Fix types in POSIX port Use TaskFunction_t and StackType_t as other ports do. * Fix portTICK_RATE_MICROSECONDS in POSIX port --------- Co-authored-by: Jacques GUILLOU Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/ThirdParty/GCC/Posix/port.c | 8 ++++---- portable/ThirdParty/GCC/Posix/portmacro.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index 82aa27ba889..12076b9898d 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -73,7 +73,7 @@ typedef struct THREAD { pthread_t pthread; - pdTASK_CODE pxCode; + TaskFunction_t pxCode; void * pvParams; BaseType_t xDying; struct event * ev; @@ -127,9 +127,9 @@ void prvFatalError( const char * pcCall, /* * See header file for description. */ -portSTACK_TYPE * pxPortInitialiseStack( portSTACK_TYPE * pxTopOfStack, - portSTACK_TYPE * pxEndOfStack, - pdTASK_CODE pxCode, +portSTACK_TYPE * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, void * pvParameters ) { Thread_t * thread; diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index 2061c323642..68655861202 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -73,7 +73,7 @@ typedef unsigned long TickType_t; #define portSTACK_GROWTH ( -1 ) #define portHAS_STACK_OVERFLOW_CHECKING ( 1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) -#define portTICK_RATE_MICROSECONDS ( ( portTickType ) 1000000 / configTICK_RATE_HZ ) +#define portTICK_RATE_MICROSECONDS ( ( TickType_t ) 1000000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ From 5fdbb7fd2b6102bddfeb665f2d276c24a86c82df Mon Sep 17 00:00:00 2001 From: Devaraj Ranganna Date: Tue, 28 Feb 2023 07:28:59 +0000 Subject: [PATCH 14/62] Cortex-M35P: Add Cortex-M35P port (#631) * Cortex-M35P: Add Cortex-M35P port The Cortex-M35P support added to kernel. The port hasn't been validated yet with TF-M. Hence TF-M support is not included in this port. Signed-off-by: Devaraj Ranganna * Add portNORETURN to the newly added portmacro.h Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> --- CMakeLists.txt | 6 + portable/ARMv8M/copy_files.py | 30 +- .../portable/GCC/ARM_CM23/portmacro.h | 2 +- .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 2 +- .../portable/GCC/ARM_CM33/portmacro.h | 2 +- .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 2 +- .../portable/GCC/ARM_CM35P/portmacro.h | 67 + .../portable/GCC/ARM_CM55/portmacro.h | 2 +- .../portable/GCC/ARM_CM85/portmacro.h | 2 +- .../portable/IAR/ARM_CM35P/portmacro.h | 78 + portable/CMakeLists.txt | 36 + portable/GCC/ARM_CM23/non_secure/portasm.c | 2 +- portable/GCC/ARM_CM23/non_secure/portmacro.h | 2 +- .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 2 +- portable/GCC/ARM_CM33/non_secure/portmacro.h | 2 +- .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 2 +- portable/GCC/ARM_CM35P/non_secure/port.c | 1261 +++++++++++++++++ portable/GCC/ARM_CM35P/non_secure/portasm.c | 470 ++++++ portable/GCC/ARM_CM35P/non_secure/portasm.h | 114 ++ portable/GCC/ARM_CM35P/non_secure/portmacro.h | 67 + .../ARM_CM35P/non_secure/portmacrocommon.h | 313 ++++ .../GCC/ARM_CM35P/secure/secure_context.c | 351 +++++ .../GCC/ARM_CM35P/secure/secure_context.h | 135 ++ .../ARM_CM35P/secure/secure_context_port.c | 97 ++ portable/GCC/ARM_CM35P/secure/secure_heap.c | 454 ++++++ portable/GCC/ARM_CM35P/secure/secure_heap.h | 66 + portable/GCC/ARM_CM35P/secure/secure_init.c | 106 ++ portable/GCC/ARM_CM35P/secure/secure_init.h | 54 + .../GCC/ARM_CM35P/secure/secure_port_macros.h | 140 ++ portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 1261 +++++++++++++++++ .../GCC/ARM_CM35P_NTZ/non_secure/portasm.c | 365 +++++ .../GCC/ARM_CM35P_NTZ/non_secure/portasm.h | 114 ++ .../GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 67 + .../non_secure/portmacrocommon.h | 313 ++++ portable/GCC/ARM_CM55/non_secure/portmacro.h | 2 +- .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 2 +- portable/GCC/ARM_CM85/non_secure/portmacro.h | 2 +- .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 2 +- portable/IAR/ARM_CM35P/non_secure/port.c | 1261 +++++++++++++++++ portable/IAR/ARM_CM35P/non_secure/portasm.h | 114 ++ portable/IAR/ARM_CM35P/non_secure/portasm.s | 353 +++++ portable/IAR/ARM_CM35P/non_secure/portmacro.h | 78 + .../ARM_CM35P/non_secure/portmacrocommon.h | 313 ++++ .../IAR/ARM_CM35P/secure/secure_context.c | 351 +++++ .../IAR/ARM_CM35P/secure/secure_context.h | 135 ++ .../secure/secure_context_port_asm.s | 86 ++ portable/IAR/ARM_CM35P/secure/secure_heap.c | 454 ++++++ portable/IAR/ARM_CM35P/secure/secure_heap.h | 66 + portable/IAR/ARM_CM35P/secure/secure_init.c | 106 ++ portable/IAR/ARM_CM35P/secure/secure_init.h | 54 + .../IAR/ARM_CM35P/secure/secure_port_macros.h | 140 ++ portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 1261 +++++++++++++++++ .../IAR/ARM_CM35P_NTZ/non_secure/portasm.h | 114 ++ .../IAR/ARM_CM35P_NTZ/non_secure/portasm.s | 262 ++++ .../IAR/ARM_CM35P_NTZ/non_secure/portmacro.h | 78 + .../non_secure/portmacrocommon.h | 313 ++++ 56 files changed, 11509 insertions(+), 25 deletions(-) create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h create mode 100644 portable/GCC/ARM_CM35P/non_secure/port.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM35P/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_context.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_context.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_context_port.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_heap.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_heap.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_init.c create mode 100644 portable/GCC/ARM_CM35P/secure/secure_init.h create mode 100644 portable/GCC/ARM_CM35P/secure/secure_port_macros.h create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/port.c create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM35P/non_secure/port.c create mode 100644 portable/IAR/ARM_CM35P/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM35P/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM35P/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_context.c create mode 100644 portable/IAR/ARM_CM35P/secure/secure_context.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s create mode 100644 portable/IAR/ARM_CM35P/secure/secure_heap.c create mode 100644 portable/IAR/ARM_CM35P/secure/secure_heap.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_init.c create mode 100644 portable/IAR/ARM_CM35P/secure/secure_init.h create mode 100644 portable/IAR/ARM_CM35P/secure/secure_port_macros.h create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/port.c create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 3224a97653b..aead9a0d841 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,6 +74,9 @@ if(NOT FREERTOS_PORT) " GCC_ARM_CM33_SECURE - Compiler: GCC Target: ARM Cortex-M33 secure\n" " GCC_ARM_CM33_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M33 non-trustzone non-secure\n" " GCC_ARM_CM33_TFM - Compiler: GCC Target: ARM Cortex-M33 non-secure for TF-M\n" + " GCC_ARM_CM35P_NONSECURE - Compiler: GCC Target: ARM Cortex-M35P non-secure\n" + " GCC_ARM_CM35P_SECURE - Compiler: GCC Target: ARM Cortex-M35P secure\n" + " GCC_ARM_CM35P_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M35P non-trustzone non-secure\n" " GCC_ARM_CM55_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-secure\n" " GCC_ARM_CM55_SECURE - Compiler: GCC Target: ARM Cortex-M55 secure\n" " GCC_ARM_CM55_NTZ_NONSECURE - Compiler: GCC Target: ARM Cortex-M55 non-trustzone non-secure\n" @@ -134,6 +137,9 @@ if(NOT FREERTOS_PORT) " IAR_ARM_CM33_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-secure\n" " IAR_ARM_CM33_SECURE - Compiler: IAR Target: ARM Cortex-M33 secure\n" " IAR_ARM_CM33_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M33 non-trustzone non-secure\n" + " IAR_ARM_CM35P_NONSECURE - Compiler: IAR Target: ARM Cortex-M35P non-secure\n" + " IAR_ARM_CM35P_SECURE - Compiler: IAR Target: ARM Cortex-M35P secure\n" + " IAR_ARM_CM35P_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M35P non-trustzone non-secure\n" " IAR_ARM_CM55_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-secure\n" " IAR_ARM_CM55_SECURE - Compiler: IAR Target: ARM Cortex-M55 secure\n" " IAR_ARM_CM55_NTZ_NONSECURE - Compiler: IAR Target: ARM Cortex-M55 non-trustzone non-secure\n" diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py index ebe8009703c..d064969809c 100644 --- a/portable/ARMv8M/copy_files.py +++ b/portable/ARMv8M/copy_files.py @@ -33,8 +33,8 @@ _FREERTOS_PORTABLE_DIRECTORY_ = os.path.dirname(_THIS_FILE_DIRECTORY_) _COMPILERS_ = ['GCC', 'IAR'] -_ARCH_NS_ = ['ARM_CM85', 'ARM_CM85_NTZ', 'ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] -_ARCH_S_ = ['ARM_CM85', 'ARM_CM55', 'ARM_CM33', 'ARM_CM23'] +_ARCH_NS_ = ['ARM_CM85', 'ARM_CM85_NTZ', 'ARM_CM55', 'ARM_CM55_NTZ', 'ARM_CM35P', 'ARM_CM35P_NTZ', 'ARM_CM33', 'ARM_CM33_NTZ', 'ARM_CM23', 'ARM_CM23_NTZ'] +_ARCH_S_ = ['ARM_CM85', 'ARM_CM55', 'ARM_CM35P', 'ARM_CM33', 'ARM_CM23'] # Files to be compiled in the Secure Project _SECURE_COMMON_FILE_PATHS_ = [ @@ -46,16 +46,18 @@ _SECURE_PORTABLE_FILE_PATHS_ = { 'GCC':{ - 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM23')], - 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], - 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], - 'ARM_CM85':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] + 'ARM_CM23' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM23')], + 'ARM_CM33' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM35P':[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM55' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')], + 'ARM_CM85' :[os.path.join('secure', 'context', 'portable', 'GCC', 'ARM_CM33')] }, 'IAR':{ - 'ARM_CM23':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM23')], - 'ARM_CM33':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], - 'ARM_CM55':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], - 'ARM_CM85':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] + 'ARM_CM23' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM23')], + 'ARM_CM33' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM35P':[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM55' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')], + 'ARM_CM85' :[os.path.join('secure', 'context', 'portable', 'IAR', 'ARM_CM33')] } } @@ -70,6 +72,10 @@ 'ARM_CM23_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM23_NTZ')], 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ')], + 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], + 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), @@ -84,6 +90,10 @@ 'ARM_CM23_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM23_NTZ')], 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ')], + 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], + 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index 2891a6c7e08..c6dad99857c 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index 2891a6c7e08..c6dad99857c 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 53c1020008d..4fe8c59147a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 53c1020008d..4fe8c59147a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h new file mode 100644 index 00000000000..33bfb283461 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -0,0 +1,67 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index 35c160f3407..adb47d8420f 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -55,7 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index 69d32d7f57a..fec6923394c 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -55,7 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h new file mode 100644 index 00000000000..a0efc1f9dcf --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h @@ -0,0 +1,78 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index ae9de9cfe1b..2824df05dc0 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -123,6 +123,20 @@ add_library(freertos_kernel_port STATIC GCC/ARM_CM33_NTZ/non_secure/portasm.c ThirdParty/GCC/ARM_TFM/os_wrapper_freertos.c> + $<$: + GCC/ARM_CM35P/non_secure/port.c + GCC/ARM_CM35P/non_secure/portasm.c> + + $<$: + GCC/ARM_CM35P/secure/secure_context_port.c + GCC/ARM_CM35P/secure/secure_context.c + GCC/ARM_CM35P/secure/secure_heap.c + GCC/ARM_CM35P/secure/secure_init.c> + + $<$: + GCC/ARM_CM35P_NTZ/non_secure/port.c + GCC/ARM_CM35P_NTZ/non_secure/portasm.c> + # ARMv8.1-M ports for GCC $<$: GCC/ARM_CM55/non_secure/port.c @@ -397,6 +411,20 @@ add_library(freertos_kernel_port STATIC IAR/ARM_CM33_NTZ/non_secure/port.c IAR/ARM_CM33_NTZ/non_secure/portasm.s> + $<$: + IAR/ARM_CM35P/non_secure/port.c + IAR/ARM_CM35P/non_secure/portasm.s> + + $<$: + IAR/ARM_CM35P/secure/secure_context_port_asm.s + IAR/ARM_CM35P/secure/secure_context.c + IAR/ARM_CM35P/secure/secure_heap.c + IAR/ARM_CM35P/secure/secure_init.c> + + $<$: + IAR/ARM_CM35P_NTZ/non_secure/port.c + IAR/ARM_CM35P_NTZ/non_secure/portasm.s> + # ARMv8.1-M ports for IAR EWARM $<$: IAR/ARM_CM55/non_secure/port.c @@ -738,6 +766,10 @@ target_include_directories(freertos_kernel_port PUBLIC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33_NTZ/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM33_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM35P/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM35P/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM35P_NTZ/non_secure> + # ARMv8.1-M ports for GCC $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/GCC/ARM_CM55/secure> @@ -860,6 +892,10 @@ target_include_directories(freertos_kernel_port PUBLIC $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33/secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM33_NTZ/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM35P/non_secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM35P/secure> + $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM35P_NTZ/non_secure> + # ARMv8.1-M ports for IAR EWARM $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55/non_secure> $<$:${CMAKE_CURRENT_LIST_DIR}/IAR/ARM_CM55/secure> diff --git a/portable/GCC/ARM_CM23/non_secure/portasm.c b/portable/GCC/ARM_CM23/non_secure/portasm.c index 5435439c5fe..44f159af1fa 100644 --- a/portable/GCC/ARM_CM23/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23/non_secure/portasm.c @@ -367,7 +367,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ " str r0, [r3] \n"/* Restore the task's xSecureContext. */ " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " push {r2, r4} \n" " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index 2891a6c7e08..c6dad99857c 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index 2891a6c7e08..c6dad99857c 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M23" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ #if( configTOTAL_MPU_REGIONS == 16 ) diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 53c1020008d..4fe8c59147a 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 53c1020008d..4fe8c59147a 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -50,7 +50,7 @@ */ #define portARCH_NAME "Cortex-M33" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.c b/portable/GCC/ARM_CM35P/non_secure/portasm.c new file mode 100644 index 00000000000..9f9b2e68d39 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portasm.c @@ -0,0 +1,470 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r2] \n"/* Program RNR = 4. */ + " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + " ldr r5, xSecureContextConst2 \n" + " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " msr control, r3 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r4 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + "xSecureContextConst2: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n"/* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " mrs r2, psp \n"/* Read PSP in r2. */ + " \n" + " cbz r0, save_ns_context \n"/* No secure context to save. */ + " push {r0-r2, r14} \n" + " bl SecureContext_SaveContext \n"/* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n"/* LR is now in r3. */ + " mov lr, r3 \n"/* LR = r3. */ + " lsls r1, r3, #25 \n"/* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " b select_next_task \n" + " \n" + " save_ns_context: \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r3, control \n"/* r3 = CONTROL. */ + " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ + " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ + " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r3] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ + " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ + " str r4, [r3] \n"/* Program MAIR0. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #4 \n"/* r4 = 4. */ + " str r4, [r3] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #8 \n"/* r4 = 8. */ + " str r4, [r3] \n"/* Program RNR = 8. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ + " movs r4, #12 \n"/* r4 = 12. */ + " str r4, [r3] \n"/* Program RNR = 12. */ + " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r3] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #else /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + #endif /* configENABLE_MPU */ + " \n" + " restore_ns_context: \n" + " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r2!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + "xSecureContextConst: .word xSecureContext \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ + +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " svc %0 \n"/* Secure context is allocated in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_ALLOCATE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, [r0] \n"/* The first item in the TCB is the top of the stack. */ + " ldr r1, [r2] \n"/* The first item on the stack is the task's xSecureContext. */ + " cmp r1, #0 \n"/* Raise svc if task's xSecureContext is not NULL. */ + " it ne \n" + " svcne %0 \n"/* Secure context is freed in the supervisor call. */ + " bx lr \n"/* Return. */ + ::"i" ( portSVC_FREE_SECURE_CONTEXT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.h b/portable/GCC/ARM_CM35P/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h new file mode 100644 index 00000000000..33bfb283461 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -0,0 +1,67 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_context.c b/portable/GCC/ARM_CM35P/secure/secure_context.c new file mode 100644 index 00000000000..0730d574dd0 --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_context.h b/portable/GCC/ARM_CM35P/secure/secure_context.h new file mode 100644 index 00000000000..d0adbaf018f --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_context_port.c b/portable/GCC/ARM_CM35P/secure/secure_context_port.c new file mode 100644 index 00000000000..13520870bca --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_context_port.c @@ -0,0 +1,97 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) __attribute__( ( naked ) ); + +void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, load_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " ldmia r0!, {r1, r2} \n" /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r1!, {r3} \n" /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + " msr control, r3 \n" /* CONTROL = r3. */ + #endif /* configENABLE_MPU */ + " \n" + " msr psplim, r2 \n" /* PSPLIM = r2. */ + " msr psp, r1 \n" /* PSP = r1. */ + " \n" + " load_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::: "r0", "r1", "r2" + ); +} +/*-----------------------------------------------------------*/ + +void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ) +{ + /* pxSecureContext value is in r0. */ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r1, ipsr \n" /* r1 = IPSR. */ + " cbz r1, save_ctx_therad_mode \n" /* Do nothing if the processor is running in the Thread Mode. */ + " mrs r1, psp \n" /* r1 = PSP. */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " vstmdb r1!, {s0} \n" /* Trigger the deferred stacking of FPU registers. */ + " vldmia r1!, {s0} \n" /* Nullify the effect of the previous statement. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " mrs r2, control \n" /* r2 = CONTROL. */ + " stmdb r1!, {r2} \n" /* Store CONTROL value on the stack. */ + #endif /* configENABLE_MPU */ + " \n" + " str r1, [r0] \n" /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + " movs r1, %0 \n" /* r1 = securecontextNO_STACK. */ + " msr psplim, r1 \n" /* PSPLIM = securecontextNO_STACK. */ + " msr psp, r1 \n" /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + " \n" + " save_ctx_therad_mode: \n" + " bx lr \n" + " \n" + ::"i" ( securecontextNO_STACK ) : "r1", "memory" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_heap.c b/portable/GCC/ARM_CM35P/secure/secure_heap.c new file mode 100644 index 00000000000..157fdbf0eec --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_heap.c @@ -0,0 +1,454 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_heap.h b/portable/GCC/ARM_CM35P/secure/secure_heap.h new file mode 100644 index 00000000000..c13590f86ad --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_init.c b/portable/GCC/ARM_CM35P/secure/secure_init.c new file mode 100644 index 00000000000..dc19ebc7d5e --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/secure/secure_init.h b/portable/GCC/ARM_CM35P/secure/secure_init.h new file mode 100644 index 00000000000..21daeda6b89 --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/GCC/ARM_CM35P/secure/secure_port_macros.h b/portable/GCC/ARM_CM35P/secure/secure_port_macros.h new file mode 100644 index 00000000000..304913b8dbf --- /dev/null +++ b/portable/GCC/ARM_CM35P/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c new file mode 100644 index 00000000000..a78529d04d9 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c @@ -0,0 +1,365 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE ensures that PRIVILEGED_FUNCTION + * is defined correctly and privileged functions are placed in correct sections. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Portasm includes. */ +#include "portasm.h" + +/* MPU_WRAPPERS_INCLUDED_FROM_API_FILE is needed to be defined only for the + * header files. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " msr control, r2 \n"/* Set this task's CONTROL value. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ + #else /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ + " \n" + " .align 4 \n" + "pxCurrentTCBConst2: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst2: .word 0xe000ed94 \n" + "xMAIR0Const2: .word 0xe000edc0 \n" + "xRNRConst2: .word 0xe000ed98 \n" + "xRBARConst2: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " tst r0, #1 \n"/* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + " ite ne \n" + " movne r0, #0 \n"/* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + " moveq r0, #1 \n"/* CONTROL[0]==0. Return true to indicate that the processor is privileged. */ + " bx lr \n"/* Return. */ + " \n" + " .align 4 \n" + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vRaisePrivilege( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* Read the CONTROL register. */ + " bic r0, #1 \n"/* Clear the bit 0. */ + " msr control, r0 \n"/* Write back the new CONTROL value. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vResetPrivilege( void ) /* __attribute__ (( naked )) */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, control \n"/* r0 = CONTROL. */ + " orr r0, #1 \n"/* r0 = r0 | 1. */ + " msr control, r0 \n"/* CONTROL = r0. */ + " bx lr \n"/* Return to the caller. */ + ::: "r0", "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vStartFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r0, xVTORConst \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n"/* Read the VTOR register which gives the address of vector table. */ + " ldr r0, [r0] \n"/* The first entry in vector table is stack pointer. */ + " msr msp, r0 \n"/* Set the MSP back to the start of the stack. */ + " cpsie i \n"/* Globally enable interrupts. */ + " cpsie f \n" + " dsb \n" + " isb \n" + " svc %0 \n"/* System call to start the first task. */ + " nop \n" + " \n" + " .align 4 \n" + "xVTORConst: .word 0xe000ed08 \n" + ::"i" ( portSVC_START_SCHEDULER ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, basepri \n"/* r0 = basepri. Return original basepri value. */ + " mov r1, %0 \n"/* r1 = configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " msr basepri, r1 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " msr basepri, r0 \n"/* basepri = ulMask. */ + " dsb \n" + " isb \n" + " bx lr \n"/* Return. */ + ::: "memory" + ); +} +/*-----------------------------------------------------------*/ + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " mrs r0, psp \n"/* Read PSP in r0. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mrs r2, control \n"/* r2 = CONTROL. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ + #else /* configENABLE_MPU */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ + #endif /* configENABLE_MPU */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " \n" + " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n"/* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n"/* r0 = 0. */ + " msr basepri, r0 \n"/* Enable interrupts. */ + " \n" + " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ + " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + " \n" + #if ( configENABLE_MPU == 1 ) + " dmb \n"/* Complete outstanding transfers before disabling MPU. */ + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + " str r4, [r2] \n"/* Disable MPU. */ + " \n" + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ + " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r3, [r2] \n"/* Program MAIR0. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #4 \n"/* r3 = 4. */ + " str r3, [r2] \n"/* Program RNR = 4. */ + " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #8 \n"/* r3 = 8. */ + " str r3, [r2] \n"/* Program RNR = 8. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ + " movs r3, #12 \n"/* r3 = 12. */ + " str r3, [r2] \n"/* Program RNR = 12. */ + " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ + " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ + " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + " str r4, [r2] \n"/* Enable MPU. */ + " dsb \n"/* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( configENABLE_MPU == 1 ) + " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ + #else /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ + #endif /* configENABLE_MPU */ + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + " it eq \n" + " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + #if ( configENABLE_MPU == 1 ) + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ + #else /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + #endif /* configENABLE_MPU */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " bx r3 \n" + " \n" + " .align 4 \n" + "pxCurrentTCBConst: .word pxCurrentTCB \n" + #if ( configENABLE_MPU == 1 ) + "xMPUCTRLConst: .word 0xe000ed94 \n" + "xMAIR0Const: .word 0xe000edc0 \n" + "xRNRConst: .word 0xe000ed98 \n" + "xRBARConst: .word 0xe000ed9c \n" + #endif /* configENABLE_MPU */ + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} +/*-----------------------------------------------------------*/ + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " tst lr, #4 \n" + " ite eq \n" + " mrseq r0, msp \n" + " mrsne r0, psp \n" + " ldr r1, svchandler_address_const \n" + " bx r1 \n" + " \n" + " .align 4 \n" + "svchandler_address_const: .word vPortSVCHandler_C \n" + ); +} +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..33bfb283461 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -0,0 +1,67 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index 35c160f3407..adb47d8420f 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -55,7 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index 35c160f3407..adb47d8420f 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -55,7 +55,7 @@ */ #define portARCH_NAME "Cortex-M55" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index 69d32d7f57a..fec6923394c 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -55,7 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index 69d32d7f57a..fec6923394c 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -55,7 +55,7 @@ */ #define portARCH_NAME "Cortex-M85" #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /** diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.h b/portable/IAR/ARM_CM35P/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.s b/portable/IAR/ARM_CM35P/non_secure/portasm.s new file mode 100644 index 00000000000..a193cd7b80e --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portasm.s @@ -0,0 +1,353 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN xSecureContext + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + EXTERN SecureContext_SaveContext + EXTERN SecureContext_LoadContext + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vPortAllocateSecureContext + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler + PUBLIC vPortFreeSecureContext +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vPortAllocateSecureContext: + svc 0 /* Secure context is allocated in the supervisor call. portSVC_ALLOCATE_SECURE_CONTEXT = 0. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r3, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ + ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r2] /* Program RNR = 4. */ + adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ + ldr r5, =xSecureContext + str r1, [r5] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + msr control, r3 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r4 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + ldr r4, =xSecureContext + str r1, [r4] /* Set xSecureContext to this task's value for the same. */ + msr psplim, r2 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + mrs r2, psp /* Read PSP in r2. */ + + cbz r0, save_ns_context /* No secure context to save. */ + push {r0-r2, r14} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* LR = r3. */ + lsls r1, r3, #25 /* r1 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ +#if ( configENABLE_MPU == 1 ) + subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ +#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ +#endif /* configENABLE_MPU */ + b select_next_task + + save_ns_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + #if ( configENABLE_MPU == 1 ) + subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #16 /* r2 = r2 + 16. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r3, control /* r3 = CONTROL. */ + mov r4, lr /* r4 = LR/EXC_RETURN. */ + subs r2, r2, #16 /* r2 = r2 - 16. */ + stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ + #else /* configENABLE_MPU */ + subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + str r2, [r1] /* Save the new top of stack in TCB. */ + adds r2, r2, #12 /* r2 = r2 + 12. */ + stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ + mrs r1, psplim /* r1 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + subs r2, r2, #12 /* r2 = r2 - 12. */ + stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ + #endif /* configENABLE_MPU */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ + + #if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r3] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ + ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ + str r4, [r3] /* Program MAIR0. */ + ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ + movs r4, #4 /* r4 = 4. */ + str r4, [r3] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r3] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r3] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + #endif /* configENABLE_MPU */ + + #if ( configENABLE_MPU == 1 ) + ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r3 /* Restore the CONTROL register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #else /* configENABLE_MPU */ + ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + mov lr, r4 /* LR = r4. */ + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r3] /* Restore the task's xSecureContext. */ + cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB. */ + push {r2, r4} + bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r2, r4} + mov lr, r4 /* LR = r4. */ + lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr + #endif /* configENABLE_MPU */ + + restore_ns_context: + ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r2!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + msr psp, r2 /* Remember the new top of stack for the task. */ + bx lr +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + +vPortFreeSecureContext: + /* r0 = uint32_t *pulTCB. */ + ldr r2, [r0] /* The first item in the TCB is the top of the stack. */ + ldr r1, [r2] /* The first item on the stack is the task's xSecureContext. */ + cmp r1, #0 /* Raise svc if task's xSecureContext is not NULL. */ + it ne + svcne 1 /* Secure context is freed in the supervisor call. portSVC_FREE_SECURE_CONTEXT = 1. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacro.h b/portable/IAR/ARM_CM35P/non_secure/portmacro.h new file mode 100644 index 00000000000..a0efc1f9dcf --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portmacro.h @@ -0,0 +1,78 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_context.c b/portable/IAR/ARM_CM35P/secure/secure_context.c new file mode 100644 index 00000000000..0730d574dd0 --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_context.c @@ -0,0 +1,351 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Secure context includes. */ +#include "secure_context.h" + +/* Secure heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief CONTROL value for privileged tasks. + * + * Bit[0] - 0 --> Thread mode is privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_PRIVILEGED 0x02 + +/** + * @brief CONTROL value for un-privileged tasks. + * + * Bit[0] - 1 --> Thread mode is un-privileged. + * Bit[1] - 1 --> Thread mode uses PSP. + */ +#define securecontextCONTROL_VALUE_UNPRIVILEGED 0x03 + +/** + * @brief Size of stack seal values in bytes. + */ +#define securecontextSTACK_SEAL_SIZE 8 + +/** + * @brief Stack seal value as recommended by ARM. + */ +#define securecontextSTACK_SEAL_VALUE 0xFEF5EDA5 + +/** + * @brief Maximum number of secure contexts. + */ +#ifndef secureconfigMAX_SECURE_CONTEXTS + #define secureconfigMAX_SECURE_CONTEXTS 8UL +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Pre-allocated array of secure contexts. + */ +SecureContext_t xSecureContexts[ secureconfigMAX_SECURE_CONTEXTS ]; +/*-----------------------------------------------------------*/ + +/** + * @brief Get a free secure context for a task from the secure context pool (xSecureContexts). + * + * This function ensures that only one secure context is allocated for a task. + * + * @param[in] pvTaskHandle The task handle for which the secure context is allocated. + * + * @return Index of a free secure context in the xSecureContexts array. + */ +static uint32_t ulGetSecureContext( void * pvTaskHandle ); + +/** + * @brief Return the secure context to the secure context pool (xSecureContexts). + * + * @param[in] ulSecureContextIndex Index of the context in the xSecureContexts array. + */ +static void vReturnSecureContext( uint32_t ulSecureContextIndex ); + +/* These are implemented in assembly. */ +extern void SecureContext_LoadContextAsm( SecureContext_t * pxSecureContext ); +extern void SecureContext_SaveContextAsm( SecureContext_t * pxSecureContext ); +/*-----------------------------------------------------------*/ + +static uint32_t ulGetSecureContext( void * pvTaskHandle ) +{ + /* Start with invalid index. */ + uint32_t i, ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + if( ( xSecureContexts[ i ].pucCurrentStackPointer == NULL ) && + ( xSecureContexts[ i ].pucStackLimit == NULL ) && + ( xSecureContexts[ i ].pucStackStart == NULL ) && + ( xSecureContexts[ i ].pvTaskHandle == NULL ) && + ( ulSecureContextIndex == secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = i; + } + else if( xSecureContexts[ i ].pvTaskHandle == pvTaskHandle ) + { + /* A task can only have one secure context. Do not allocate a second + * context for the same task. */ + ulSecureContextIndex = secureconfigMAX_SECURE_CONTEXTS; + break; + } + } + + return ulSecureContextIndex; +} +/*-----------------------------------------------------------*/ + +static void vReturnSecureContext( uint32_t ulSecureContextIndex ) +{ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = NULL; + xSecureContexts[ ulSecureContextIndex ].pucStackStart = NULL; + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = NULL; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_Init( void ) +{ + uint32_t ulIPSR, i; + static uint32_t ulSecureContextsInitialized = 0; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ( ulIPSR != 0 ) && ( ulSecureContextsInitialized == 0 ) ) + { + /* Ensure to initialize secure contexts only once. */ + ulSecureContextsInitialized = 1; + + /* No stack for thread mode until a task's context is loaded. */ + secureportSET_PSPLIM( securecontextNO_STACK ); + secureportSET_PSP( securecontextNO_STACK ); + + /* Initialize all secure contexts. */ + for( i = 0; i < secureconfigMAX_SECURE_CONTEXTS; i++ ) + { + xSecureContexts[ i ].pucCurrentStackPointer = NULL; + xSecureContexts[ i ].pucStackLimit = NULL; + xSecureContexts[ i ].pucStackStart = NULL; + xSecureContexts[ i ].pvTaskHandle = NULL; + } + + #if ( configENABLE_MPU == 1 ) + { + /* Configure thread mode to use PSP and to be unprivileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_UNPRIVILEGED ); + } + #else /* configENABLE_MPU */ + { + /* Configure thread mode to use PSP and to be privileged. */ + secureportSET_CONTROL( securecontextCONTROL_VALUE_PRIVILEGED ); + } + #endif /* configENABLE_MPU */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ) +#else /* configENABLE_MPU */ + secureportNON_SECURE_CALLABLE SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ) +#endif /* configENABLE_MPU */ +{ + uint8_t * pucStackMemory = NULL; + uint8_t * pucStackLimit; + uint32_t ulIPSR, ulSecureContextIndex; + SecureContextHandle_t xSecureContextHandle = securecontextINVALID_CONTEXT_ID; + + #if ( configENABLE_MPU == 1 ) + uint32_t * pulCurrentStackPointer = NULL; + #endif /* configENABLE_MPU */ + + /* Read the Interrupt Program Status Register (IPSR) and Process Stack Limit + * Register (PSPLIM) value. */ + secureportREAD_IPSR( ulIPSR ); + secureportREAD_PSPLIM( pucStackLimit ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. + * Also do nothing, if a secure context us already loaded. PSPLIM is set to + * securecontextNO_STACK when no secure context is loaded. */ + if( ( ulIPSR != 0 ) && ( pucStackLimit == securecontextNO_STACK ) ) + { + /* Ontain a free secure context. */ + ulSecureContextIndex = ulGetSecureContext( pvTaskHandle ); + + /* Were we able to get a free context? */ + if( ulSecureContextIndex < secureconfigMAX_SECURE_CONTEXTS ) + { + /* Allocate the stack space. */ + pucStackMemory = pvPortMalloc( ulSecureStackSize + securecontextSTACK_SEAL_SIZE ); + + if( pucStackMemory != NULL ) + { + /* Since stack grows down, the starting point will be the last + * location. Note that this location is next to the last + * allocated byte for stack (excluding the space for seal values) + * because the hardware decrements the stack pointer before + * writing i.e. if stack pointer is 0x2, a push operation will + * decrement the stack pointer to 0x1 and then write at 0x1. */ + xSecureContexts[ ulSecureContextIndex ].pucStackStart = pucStackMemory + ulSecureStackSize; + + /* Seal the created secure process stack. */ + *( uint32_t * )( pucStackMemory + ulSecureStackSize ) = securecontextSTACK_SEAL_VALUE; + *( uint32_t * )( pucStackMemory + ulSecureStackSize + 4 ) = securecontextSTACK_SEAL_VALUE; + + /* The stack cannot go beyond this location. This value is + * programmed in the PSPLIM register on context switch.*/ + xSecureContexts[ ulSecureContextIndex ].pucStackLimit = pucStackMemory; + + xSecureContexts[ ulSecureContextIndex ].pvTaskHandle = pvTaskHandle; + + #if ( configENABLE_MPU == 1 ) + { + /* Store the correct CONTROL value for the task on the stack. + * This value is programmed in the CONTROL register on + * context switch. */ + pulCurrentStackPointer = ( uint32_t * ) xSecureContexts[ ulSecureContextIndex ].pucStackStart; + pulCurrentStackPointer--; + + if( ulIsTaskPrivileged ) + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_PRIVILEGED; + } + else + { + *( pulCurrentStackPointer ) = securecontextCONTROL_VALUE_UNPRIVILEGED; + } + + /* Store the current stack pointer. This value is programmed in + * the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = ( uint8_t * ) pulCurrentStackPointer; + } + #else /* configENABLE_MPU */ + { + /* Current SP is set to the starting of the stack. This + * value programmed in the PSP register on context switch. */ + xSecureContexts[ ulSecureContextIndex ].pucCurrentStackPointer = xSecureContexts[ ulSecureContextIndex ].pucStackStart; + } + #endif /* configENABLE_MPU */ + + /* Ensure to never return 0 as a valid context handle. */ + xSecureContextHandle = ulSecureContextIndex + 1UL; + } + } + } + + return xSecureContextHandle; +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint32_t ulIPSR, ulSecureContextIndex; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* Only free if a valid context handle is passed. */ + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + /* Ensure that the secure context being deleted is associated with + * the task. */ + if( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) + { + /* Free the stack space. */ + vPortFree( xSecureContexts[ ulSecureContextIndex ].pucStackLimit ); + + /* Return the secure context back to the free secure contexts pool. */ + vReturnSecureContext( ulSecureContextIndex ); + } + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that no secure context is loaded and the task is loading it's + * own context. */ + if( ( pucStackLimit == securecontextNO_STACK ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_LoadContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ) +{ + uint8_t * pucStackLimit; + uint32_t ulSecureContextIndex; + + if( ( xSecureContextHandle > 0UL ) && ( xSecureContextHandle <= secureconfigMAX_SECURE_CONTEXTS ) ) + { + ulSecureContextIndex = xSecureContextHandle - 1UL; + + secureportREAD_PSPLIM( pucStackLimit ); + + /* Ensure that task's context is loaded and the task is saving it's own + * context. */ + if( ( xSecureContexts[ ulSecureContextIndex ].pucStackLimit == pucStackLimit ) && + ( xSecureContexts[ ulSecureContextIndex ].pvTaskHandle == pvTaskHandle ) ) + { + SecureContext_SaveContextAsm( &( xSecureContexts[ ulSecureContextIndex ] ) ); + } + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/secure/secure_context.h b/portable/IAR/ARM_CM35P/secure/secure_context.h new file mode 100644 index 00000000000..d0adbaf018f --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_context.h @@ -0,0 +1,135 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_CONTEXT_H__ +#define __SECURE_CONTEXT_H__ + +/* Standard includes. */ +#include + +/* FreeRTOS includes. */ +#include "FreeRTOSConfig.h" + +/** + * @brief PSP value when no secure context is loaded. + */ +#define securecontextNO_STACK 0x0 + +/** + * @brief Invalid context ID. + */ +#define securecontextINVALID_CONTEXT_ID 0UL +/*-----------------------------------------------------------*/ + +/** + * @brief Structure to represent a secure context. + * + * @note Since stack grows down, pucStackStart is the highest address while + * pucStackLimit is the first address of the allocated memory. + */ +typedef struct SecureContext +{ + uint8_t * pucCurrentStackPointer; /**< Current value of stack pointer (PSP). */ + uint8_t * pucStackLimit; /**< Last location of the stack memory (PSPLIM). */ + uint8_t * pucStackStart; /**< First location of the stack memory. */ + void * pvTaskHandle; /**< Task handle of the task this context is associated with. */ +} SecureContext_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Opaque handle for a secure context. + */ +typedef uint32_t SecureContextHandle_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Initializes the secure context management system. + * + * PSP is set to NULL and therefore a task must allocate and load a context + * before calling any secure side function in the thread mode. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureContext_Init( void ); + +/** + * @brief Allocates a context on the secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] ulSecureStackSize Size of the stack to allocate on secure side. + * @param[in] ulIsTaskPrivileged 1 if the calling task is privileged, 0 otherwise. + * + * @return Opaque context handle if context is successfully allocated, NULL + * otherwise. + */ +#if ( configENABLE_MPU == 1 ) + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + uint32_t ulIsTaskPrivileged, + void * pvTaskHandle ); +#else /* configENABLE_MPU */ + SecureContextHandle_t SecureContext_AllocateContext( uint32_t ulSecureStackSize, + void * pvTaskHandle ); +#endif /* configENABLE_MPU */ + +/** + * @brief Frees the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the + * context to be freed. + */ +void SecureContext_FreeContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Loads the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be loaded. + */ +void SecureContext_LoadContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +/** + * @brief Saves the given context. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + * + * @param[in] xSecureContextHandle Context handle corresponding to the context + * to be saved. + */ +void SecureContext_SaveContext( SecureContextHandle_t xSecureContextHandle, void * pvTaskHandle ); + +#endif /* __SECURE_CONTEXT_H__ */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s b/portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s new file mode 100644 index 00000000000..400bd0107a3 --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_context_port_asm.s @@ -0,0 +1,86 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + SECTION .text:CODE:NOROOT(2) + THUMB + +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + PUBLIC SecureContext_LoadContextAsm + PUBLIC SecureContext_SaveContextAsm +/*-----------------------------------------------------------*/ + +SecureContext_LoadContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, load_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + ldmia r0!, {r1, r2} /* r1 = pxSecureContext->pucCurrentStackPointer, r2 = pxSecureContext->pucStackLimit. */ + +#if ( configENABLE_MPU == 1 ) + ldmia r1!, {r3} /* Read CONTROL register value from task's stack. r3 = CONTROL. */ + msr control, r3 /* CONTROL = r3. */ +#endif /* configENABLE_MPU */ + + msr psplim, r2 /* PSPLIM = r2. */ + msr psp, r1 /* PSP = r1. */ + + load_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + +SecureContext_SaveContextAsm: + /* pxSecureContext value is in r0. */ + mrs r1, ipsr /* r1 = IPSR. */ + cbz r1, save_ctx_therad_mode /* Do nothing if the processor is running in the Thread Mode. */ + mrs r1, psp /* r1 = PSP. */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + vstmdb r1!, {s0} /* Trigger the deferred stacking of FPU registers. */ + vldmia r1!, {s0} /* Nullify the effect of the previous statement. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + +#if ( configENABLE_MPU == 1 ) + mrs r2, control /* r2 = CONTROL. */ + stmdb r1!, {r2} /* Store CONTROL value on the stack. */ +#endif /* configENABLE_MPU */ + + str r1, [r0] /* Save the top of stack in context. pxSecureContext->pucCurrentStackPointer = r1. */ + movs r1, #0 /* r1 = securecontextNO_STACK. */ + msr psplim, r1 /* PSPLIM = securecontextNO_STACK. */ + msr psp, r1 /* PSP = securecontextNO_STACK i.e. No stack for thread mode until next task's context is loaded. */ + + save_ctx_therad_mode: + bx lr +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM35P/secure/secure_heap.c b/portable/IAR/ARM_CM35P/secure/secure_heap.c new file mode 100644 index 00000000000..157fdbf0eec --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_heap.c @@ -0,0 +1,454 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure context heap includes. */ +#include "secure_heap.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Total heap size. + */ +#ifndef secureconfigTOTAL_HEAP_SIZE + #define secureconfigTOTAL_HEAP_SIZE ( ( ( size_t ) ( 10 * 1024 ) ) ) +#endif + +/* No test marker by default. */ +#ifndef mtCOVERAGE_TEST_MARKER + #define mtCOVERAGE_TEST_MARKER() +#endif + +/* No tracing by default. */ +#ifndef traceMALLOC + #define traceMALLOC( pvReturn, xWantedSize ) +#endif + +/* No tracing by default. */ +#ifndef traceFREE + #define traceFREE( pv, xBlockSize ) +#endif + +/* Block sizes must not get too small. */ +#define secureheapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) ) + +/* Assumes 8bit bytes! */ +#define secureheapBITS_PER_BYTE ( ( size_t ) 8 ) +/*-----------------------------------------------------------*/ + +/* Allocate the memory for the heap. */ +#if ( configAPPLICATION_ALLOCATED_HEAP == 1 ) + +/* The application writer has already defined the array used for the RTOS +* heap - probably so it can be placed in a special segment or address. */ + extern uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#else /* configAPPLICATION_ALLOCATED_HEAP */ + static uint8_t ucHeap[ secureconfigTOTAL_HEAP_SIZE ]; +#endif /* configAPPLICATION_ALLOCATED_HEAP */ + +/** + * @brief The linked list structure. + * + * This is used to link free blocks in order of their memory address. + */ +typedef struct A_BLOCK_LINK +{ + struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */ + size_t xBlockSize; /**< The size of the free block. */ +} BlockLink_t; +/*-----------------------------------------------------------*/ + +/** + * @brief Called automatically to setup the required heap structures the first + * time pvPortMalloc() is called. + */ +static void prvHeapInit( void ); + +/** + * @brief Inserts a block of memory that is being freed into the correct + * position in the list of free memory blocks. + * + * The block being freed will be merged with the block in front it and/or the + * block behind it if the memory blocks are adjacent to each other. + * + * @param[in] pxBlockToInsert The block being freed. + */ +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ); +/*-----------------------------------------------------------*/ + +/** + * @brief The size of the structure placed at the beginning of each allocated + * memory block must by correctly byte aligned. + */ +static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( secureportBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + +/** + * @brief Create a couple of list links to mark the start and end of the list. + */ +static BlockLink_t xStart; +static BlockLink_t * pxEnd = NULL; + +/** + * @brief Keeps track of the number of free bytes remaining, but says nothing + * about fragmentation. + */ +static size_t xFreeBytesRemaining = 0U; +static size_t xMinimumEverFreeBytesRemaining = 0U; + +/** + * @brief Gets set to the top bit of an size_t type. + * + * When this bit in the xBlockSize member of an BlockLink_t structure is set + * then the block belongs to the application. When the bit is free the block is + * still part of the free heap space. + */ +static size_t xBlockAllocatedBit = 0; +/*-----------------------------------------------------------*/ + +static void prvHeapInit( void ) +{ + BlockLink_t * pxFirstFreeBlock; + uint8_t * pucAlignedHeap; + size_t uxAddress; + size_t xTotalHeapSize = secureconfigTOTAL_HEAP_SIZE; + + /* Ensure the heap starts on a correctly aligned boundary. */ + uxAddress = ( size_t ) ucHeap; + + if( ( uxAddress & secureportBYTE_ALIGNMENT_MASK ) != 0 ) + { + uxAddress += ( secureportBYTE_ALIGNMENT - 1 ); + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + xTotalHeapSize -= uxAddress - ( size_t ) ucHeap; + } + + pucAlignedHeap = ( uint8_t * ) uxAddress; + + /* xStart is used to hold a pointer to the first item in the list of free + * blocks. The void cast is used to prevent compiler warnings. */ + xStart.pxNextFreeBlock = ( void * ) pucAlignedHeap; + xStart.xBlockSize = ( size_t ) 0; + + /* pxEnd is used to mark the end of the list of free blocks and is inserted + * at the end of the heap space. */ + uxAddress = ( ( size_t ) pucAlignedHeap ) + xTotalHeapSize; + uxAddress -= xHeapStructSize; + uxAddress &= ~( ( size_t ) secureportBYTE_ALIGNMENT_MASK ); + pxEnd = ( void * ) uxAddress; + pxEnd->xBlockSize = 0; + pxEnd->pxNextFreeBlock = NULL; + + /* To start with there is a single free block that is sized to take up the + * entire heap space, minus the space taken by pxEnd. */ + pxFirstFreeBlock = ( void * ) pucAlignedHeap; + pxFirstFreeBlock->xBlockSize = uxAddress - ( size_t ) pxFirstFreeBlock; + pxFirstFreeBlock->pxNextFreeBlock = pxEnd; + + /* Only one block exists - and it covers the entire usable heap space. */ + xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize; + + /* Work out the position of the top bit in a size_t variable. */ + xBlockAllocatedBit = ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * secureheapBITS_PER_BYTE ) - 1 ); +} +/*-----------------------------------------------------------*/ + +static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) +{ + BlockLink_t * pxIterator; + uint8_t * puc; + + /* Iterate through the list until a block is found that has a higher address + * than the block being inserted. */ + for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) + { + /* Nothing to do here, just iterate to the right position. */ + } + + /* Do the block being inserted, and the block it is being inserted after + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxIterator; + + if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert ) + { + pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; + pxBlockToInsert = pxIterator; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Do the block being inserted, and the block it is being inserted before + * make a contiguous block of memory? */ + puc = ( uint8_t * ) pxBlockToInsert; + + if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock ) + { + if( pxIterator->pxNextFreeBlock != pxEnd ) + { + /* Form one big block from the two blocks. */ + pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxEnd; + } + } + else + { + pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; + } + + /* If the block being inserted plugged a gab, so was merged with the block + * before and the block after, then it's pxNextFreeBlock pointer will have + * already been set, and should not be set here as that would make it point + * to itself. */ + if( pxIterator != pxBlockToInsert ) + { + pxIterator->pxNextFreeBlock = pxBlockToInsert; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +void * pvPortMalloc( size_t xWantedSize ) +{ + BlockLink_t * pxBlock; + BlockLink_t * pxPreviousBlock; + BlockLink_t * pxNewBlockLink; + void * pvReturn = NULL; + + /* If this is the first call to malloc then the heap will require + * initialisation to setup the list of free blocks. */ + if( pxEnd == NULL ) + { + prvHeapInit(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Check the requested block size is not so large that the top bit is set. + * The top bit of the block size member of the BlockLink_t structure is used + * to determine who owns the block - the application or the kernel, so it + * must be free. */ + if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) + { + /* The wanted size is increased so it can contain a BlockLink_t + * structure in addition to the requested amount of bytes. */ + if( xWantedSize > 0 ) + { + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number of + * bytes. */ + if( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xWantedSize += ( secureportBYTE_ALIGNMENT - ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) ); + secureportASSERT( ( xWantedSize & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) + { + /* Traverse the list from the start (lowest address) block until + * one of adequate size is found. */ + pxPreviousBlock = &xStart; + pxBlock = xStart.pxNextFreeBlock; + + while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) + { + pxPreviousBlock = pxBlock; + pxBlock = pxBlock->pxNextFreeBlock; + } + + /* If the end marker was reached then a block of adequate size was + * not found. */ + if( pxBlock != pxEnd ) + { + /* Return the memory space pointed to - jumping over the + * BlockLink_t structure at its start. */ + pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); + + /* This block is being returned for use so must be taken out + * of the list of free blocks. */ + pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; + + /* If the block is larger than required it can be split into + * two. */ + if( ( pxBlock->xBlockSize - xWantedSize ) > secureheapMINIMUM_BLOCK_SIZE ) + { + /* This block is to be split into two. Create a new + * block following the number of bytes requested. The void + * cast is used to prevent byte alignment warnings from the + * compiler. */ + pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); + secureportASSERT( ( ( ( size_t ) pxNewBlockLink ) & secureportBYTE_ALIGNMENT_MASK ) == 0 ); + + /* Calculate the sizes of two blocks split from the single + * block. */ + pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; + pxBlock->xBlockSize = xWantedSize; + + /* Insert the new block into the list of free blocks. */ + prvInsertBlockIntoFreeList( pxNewBlockLink ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xFreeBytesRemaining -= pxBlock->xBlockSize; + + if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) + { + xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The block is being returned - it is allocated and owned by + * the application and has no "next" block. */ + pxBlock->xBlockSize |= xBlockAllocatedBit; + pxBlock->pxNextFreeBlock = NULL; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + traceMALLOC( pvReturn, xWantedSize ); + + #if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) + { + if( pvReturn == NULL ) + { + extern void vApplicationMallocFailedHook( void ); + vApplicationMallocFailedHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( secureconfigUSE_MALLOC_FAILED_HOOK == 1 ) */ + + secureportASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) secureportBYTE_ALIGNMENT_MASK ) == 0 ); + return pvReturn; +} +/*-----------------------------------------------------------*/ + +void vPortFree( void * pv ) +{ + uint8_t * puc = ( uint8_t * ) pv; + BlockLink_t * pxLink; + + if( pv != NULL ) + { + /* The memory being freed will have an BlockLink_t structure immediately + * before it. */ + puc -= xHeapStructSize; + + /* This casting is to keep the compiler from issuing warnings. */ + pxLink = ( void * ) puc; + + /* Check the block is actually allocated. */ + secureportASSERT( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ); + secureportASSERT( pxLink->pxNextFreeBlock == NULL ); + + if( ( pxLink->xBlockSize & xBlockAllocatedBit ) != 0 ) + { + if( pxLink->pxNextFreeBlock == NULL ) + { + /* The block is being returned to the heap - it is no longer + * allocated. */ + pxLink->xBlockSize &= ~xBlockAllocatedBit; + + secureportDISABLE_NON_SECURE_INTERRUPTS(); + { + /* Add this block to the list of free blocks. */ + xFreeBytesRemaining += pxLink->xBlockSize; + traceFREE( pv, pxLink->xBlockSize ); + prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); + } + secureportENABLE_NON_SECURE_INTERRUPTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } +} +/*-----------------------------------------------------------*/ + +size_t xPortGetFreeHeapSize( void ) +{ + return xFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ + +size_t xPortGetMinimumEverFreeHeapSize( void ) +{ + return xMinimumEverFreeBytesRemaining; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/secure/secure_heap.h b/portable/IAR/ARM_CM35P/secure/secure_heap.h new file mode 100644 index 00000000000..c13590f86ad --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_heap.h @@ -0,0 +1,66 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_HEAP_H__ +#define __SECURE_HEAP_H__ + +/* Standard includes. */ +#include + +/** + * @brief Allocates memory from heap. + * + * @param[in] xWantedSize The size of the memory to be allocated. + * + * @return Pointer to the memory region if the allocation is successful, NULL + * otherwise. + */ +void * pvPortMalloc( size_t xWantedSize ); + +/** + * @brief Frees the previously allocated memory. + * + * @param[in] pv Pointer to the memory to be freed. + */ +void vPortFree( void * pv ); + +/** + * @brief Get the free heap size. + * + * @return Free heap size. + */ +size_t xPortGetFreeHeapSize( void ); + +/** + * @brief Get the minimum ever free heap size. + * + * @return Minimum ever free heap size. + */ +size_t xPortGetMinimumEverFreeHeapSize( void ); + +#endif /* __SECURE_HEAP_H__ */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_init.c b/portable/IAR/ARM_CM35P/secure/secure_init.c new file mode 100644 index 00000000000..dc19ebc7d5e --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_init.c @@ -0,0 +1,106 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Standard includes. */ +#include + +/* Secure init includes. */ +#include "secure_init.h" + +/* Secure port macros. */ +#include "secure_port_macros.h" + +/** + * @brief Constants required to manipulate the SCB. + */ +#define secureinitSCB_AIRCR ( ( volatile uint32_t * ) 0xe000ed0c ) /* Application Interrupt and Reset Control Register. */ +#define secureinitSCB_AIRCR_VECTKEY_POS ( 16UL ) +#define secureinitSCB_AIRCR_VECTKEY_MASK ( 0xFFFFUL << secureinitSCB_AIRCR_VECTKEY_POS ) +#define secureinitSCB_AIRCR_PRIS_POS ( 14UL ) +#define secureinitSCB_AIRCR_PRIS_MASK ( 1UL << secureinitSCB_AIRCR_PRIS_POS ) + +/** + * @brief Constants required to manipulate the FPU. + */ +#define secureinitFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define secureinitFPCCR_LSPENS_POS ( 29UL ) +#define secureinitFPCCR_LSPENS_MASK ( 1UL << secureinitFPCCR_LSPENS_POS ) +#define secureinitFPCCR_TS_POS ( 26UL ) +#define secureinitFPCCR_TS_MASK ( 1UL << secureinitFPCCR_TS_POS ) + +#define secureinitNSACR ( ( volatile uint32_t * ) 0xe000ed8c ) /* Non-secure Access Control Register. */ +#define secureinitNSACR_CP10_POS ( 10UL ) +#define secureinitNSACR_CP10_MASK ( 1UL << secureinitNSACR_CP10_POS ) +#define secureinitNSACR_CP11_POS ( 11UL ) +#define secureinitNSACR_CP11_MASK ( 1UL << secureinitNSACR_CP11_POS ) +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_DePrioritizeNSExceptions( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + *( secureinitSCB_AIRCR ) = ( *( secureinitSCB_AIRCR ) & ~( secureinitSCB_AIRCR_VECTKEY_MASK | secureinitSCB_AIRCR_PRIS_MASK ) ) | + ( ( 0x05FAUL << secureinitSCB_AIRCR_VECTKEY_POS ) & secureinitSCB_AIRCR_VECTKEY_MASK ) | + ( ( 0x1UL << secureinitSCB_AIRCR_PRIS_POS ) & secureinitSCB_AIRCR_PRIS_MASK ); + } +} +/*-----------------------------------------------------------*/ + +secureportNON_SECURE_CALLABLE void SecureInit_EnableNSFPUAccess( void ) +{ + uint32_t ulIPSR; + + /* Read the Interrupt Program Status Register (IPSR) value. */ + secureportREAD_IPSR( ulIPSR ); + + /* Do nothing if the processor is running in the Thread Mode. IPSR is zero + * when the processor is running in the Thread Mode. */ + if( ulIPSR != 0 ) + { + /* CP10 = 1 ==> Non-secure access to the Floating Point Unit is + * permitted. CP11 should be programmed to the same value as CP10. */ + *( secureinitNSACR ) |= ( secureinitNSACR_CP10_MASK | secureinitNSACR_CP11_MASK ); + + /* LSPENS = 0 ==> LSPEN is writable fron non-secure state. This ensures + * that we can enable/disable lazy stacking in port.c file. */ + *( secureinitFPCCR ) &= ~( secureinitFPCCR_LSPENS_MASK ); + + /* TS = 1 ==> Treat FP registers as secure i.e. callee saved FP + * registers (S16-S31) are also pushed to stack on exception entry and + * restored on exception return. */ + *( secureinitFPCCR ) |= ( secureinitFPCCR_TS_MASK ); + } +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/secure/secure_init.h b/portable/IAR/ARM_CM35P/secure/secure_init.h new file mode 100644 index 00000000000..21daeda6b89 --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_init.h @@ -0,0 +1,54 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_INIT_H__ +#define __SECURE_INIT_H__ + +/** + * @brief De-prioritizes the non-secure exceptions. + * + * This is needed to ensure that the non-secure PendSV runs at the lowest + * priority. Context switch is done in the non-secure PendSV handler. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_DePrioritizeNSExceptions( void ); + +/** + * @brief Sets up the Floating Point Unit (FPU) for Non-Secure access. + * + * Also sets FPCCR.TS=1 to ensure that the content of the Floating Point + * Registers are not leaked to the non-secure side. + * + * @note This function must be called in the handler mode. It is no-op if called + * in the thread mode. + */ +void SecureInit_EnableNSFPUAccess( void ); + +#endif /* __SECURE_INIT_H__ */ diff --git a/portable/IAR/ARM_CM35P/secure/secure_port_macros.h b/portable/IAR/ARM_CM35P/secure/secure_port_macros.h new file mode 100644 index 00000000000..304913b8dbf --- /dev/null +++ b/portable/IAR/ARM_CM35P/secure/secure_port_macros.h @@ -0,0 +1,140 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __SECURE_PORT_MACROS_H__ +#define __SECURE_PORT_MACROS_H__ + +/** + * @brief Byte alignment requirements. + */ +#define secureportBYTE_ALIGNMENT 8 +#define secureportBYTE_ALIGNMENT_MASK ( 0x0007 ) + +/** + * @brief Macro to declare a function as non-secure callable. + */ +#if defined( __IAR_SYSTEMS_ICC__ ) + #define secureportNON_SECURE_CALLABLE __cmse_nonsecure_entry __root +#else + #define secureportNON_SECURE_CALLABLE __attribute__( ( cmse_nonsecure_entry ) ) __attribute__( ( used ) ) +#endif + +/** + * @brief Set the secure PRIMASK value. + */ +#define secureportSET_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Set the non-secure PRIMASK value. + */ +#define secureportSET_NON_SECURE_PRIMASK( ulPrimaskValue ) \ + __asm volatile ( "msr primask_ns, %0" : : "r" ( ulPrimaskValue ) : "memory" ) + +/** + * @brief Read the PSP value in the given variable. + */ +#define secureportREAD_PSP( pucOutCurrentStackPointer ) \ + __asm volatile ( "mrs %0, psp" : "=r" ( pucOutCurrentStackPointer ) ) + +/** + * @brief Set the PSP to the given value. + */ +#define secureportSET_PSP( pucCurrentStackPointer ) \ + __asm volatile ( "msr psp, %0" : : "r" ( pucCurrentStackPointer ) ) + +/** + * @brief Read the PSPLIM value in the given variable. + */ +#define secureportREAD_PSPLIM( pucOutStackLimit ) \ + __asm volatile ( "mrs %0, psplim" : "=r" ( pucOutStackLimit ) ) + +/** + * @brief Set the PSPLIM to the given value. + */ +#define secureportSET_PSPLIM( pucStackLimit ) \ + __asm volatile ( "msr psplim, %0" : : "r" ( pucStackLimit ) ) + +/** + * @brief Set the NonSecure MSP to the given value. + */ +#define secureportSET_MSP_NS( pucMainStackPointer ) \ + __asm volatile ( "msr msp_ns, %0" : : "r" ( pucMainStackPointer ) ) + +/** + * @brief Set the CONTROL register to the given value. + */ +#define secureportSET_CONTROL( ulControl ) \ + __asm volatile ( "msr control, %0" : : "r" ( ulControl ) : "memory" ) + +/** + * @brief Read the Interrupt Program Status Register (IPSR) value in the given + * variable. + */ +#define secureportREAD_IPSR( ulIPSR ) \ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulIPSR ) ) + +/** + * @brief PRIMASK value to enable interrupts. + */ +#define secureportPRIMASK_ENABLE_INTERRUPTS_VAL 0 + +/** + * @brief PRIMASK value to disable interrupts. + */ +#define secureportPRIMASK_DISABLE_INTERRUPTS_VAL 1 + +/** + * @brief Disable secure interrupts. + */ +#define secureportDISABLE_SECURE_INTERRUPTS() secureportSET_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Disable non-secure interrupts. + * + * This effectively disables context switches. + */ +#define secureportDISABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_DISABLE_INTERRUPTS_VAL ) + +/** + * @brief Enable non-secure interrupts. + */ +#define secureportENABLE_NON_SECURE_INTERRUPTS() secureportSET_NON_SECURE_PRIMASK( secureportPRIMASK_ENABLE_INTERRUPTS_VAL ) + +/** + * @brief Assert definition. + */ +#define secureportASSERT( x ) \ + if( ( x ) == 0 ) \ + { \ + secureportDISABLE_SECURE_INTERRUPTS(); \ + secureportDISABLE_NON_SECURE_INTERRUPTS(); \ + for( ; ; ) {; } \ + } + +#endif /* __SECURE_PORT_MACROS_H__ */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c new file mode 100644 index 00000000000..9976daee49a --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -0,0 +1,1261 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/* Portasm includes. */ +#include "portasm.h" + +#if ( configENABLE_TRUSTZONE == 1 ) + /* Secure components includes. */ + #include "secure_context.h" + #include "secure_init.h" +#endif /* configENABLE_TRUSTZONE */ + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/** + * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only + * i.e. the processor boots as secure and never jumps to the non-secure side. + * The Trust Zone support in the port must be disabled in order to run FreeRTOS + * on the secure side. The following are the valid configuration seetings: + * + * 1. Run FreeRTOS on the Secure Side: + * configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0 + * + * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1 + * + * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support: + * configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0 + */ +#if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) ) + #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the NVIC. + */ +#define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) +#define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) +#define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) ) +#define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) ) +#define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL ) +#define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL ) +#define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL ) +#define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL ) +#define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) +#define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL ) +#define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the SCB. + */ +#define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( volatile uint32_t * ) 0xe000ed24 ) +#define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the FPU. + */ +#define portCPACR ( ( volatile uint32_t * ) 0xe000ed88 ) /* Coprocessor Access Control Register. */ +#define portCPACR_CP10_VALUE ( 3UL ) +#define portCPACR_CP11_VALUE portCPACR_CP10_VALUE +#define portCPACR_CP10_POS ( 20UL ) +#define portCPACR_CP11_POS ( 22UL ) + +#define portFPCCR ( ( volatile uint32_t * ) 0xe000ef34 ) /* Floating Point Context Control Register. */ +#define portFPCCR_ASPEN_POS ( 31UL ) +#define portFPCCR_ASPEN_MASK ( 1UL << portFPCCR_ASPEN_POS ) +#define portFPCCR_LSPEN_POS ( 30UL ) +#define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to manipulate the MPU. + */ +#define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) ) +#define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) ) +#define portMPU_RNR_REG ( *( ( volatile uint32_t * ) 0xe000ed98 ) ) + +#define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) ) +#define portMPU_RLAR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) ) + +#define portMPU_RBAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda4 ) ) +#define portMPU_RLAR_A1_REG ( *( ( volatile uint32_t * ) 0xe000eda8 ) ) + +#define portMPU_RBAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edac ) ) +#define portMPU_RLAR_A2_REG ( *( ( volatile uint32_t * ) 0xe000edb0 ) ) + +#define portMPU_RBAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb4 ) ) +#define portMPU_RLAR_A3_REG ( *( ( volatile uint32_t * ) 0xe000edb8 ) ) + +#define portMPU_MAIR0_REG ( *( ( volatile uint32_t * ) 0xe000edc0 ) ) +#define portMPU_MAIR1_REG ( *( ( volatile uint32_t * ) 0xe000edc4 ) ) + +#define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ + +#define portMPU_MAIR_ATTR0_POS ( 0UL ) +#define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR1_POS ( 8UL ) +#define portMPU_MAIR_ATTR1_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR2_POS ( 16UL ) +#define portMPU_MAIR_ATTR2_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR3_POS ( 24UL ) +#define portMPU_MAIR_ATTR3_MASK ( 0xff000000 ) + +#define portMPU_MAIR_ATTR4_POS ( 0UL ) +#define portMPU_MAIR_ATTR4_MASK ( 0x000000ff ) + +#define portMPU_MAIR_ATTR5_POS ( 8UL ) +#define portMPU_MAIR_ATTR5_MASK ( 0x0000ff00 ) + +#define portMPU_MAIR_ATTR6_POS ( 16UL ) +#define portMPU_MAIR_ATTR6_MASK ( 0x00ff0000 ) + +#define portMPU_MAIR_ATTR7_POS ( 24UL ) +#define portMPU_MAIR_ATTR7_MASK ( 0xff000000 ) + +#define portMPU_RLAR_ATTR_INDEX0 ( 0UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX1 ( 1UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX2 ( 2UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX3 ( 3UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX4 ( 4UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX5 ( 5UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX6 ( 6UL << 1UL ) +#define portMPU_RLAR_ATTR_INDEX7 ( 7UL << 1UL ) + +#define portMPU_RLAR_REGION_ENABLE ( 1UL ) + +/* Enable privileged access to unmapped region. */ +#define portMPU_PRIV_BACKGROUND_ENABLE_BIT ( 1UL << 2UL ) + +/* Enable MPU. */ +#define portMPU_ENABLE_BIT ( 1UL << 0UL ) + +/* Expected value of the portMPU_TYPE register. */ +#define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief The maximum 24-bit number. + * + * It is needed because the systick is a 24-bit counter. + */ +#define portMAX_24_BIT_NUMBER ( 0xffffffUL ) + +/** + * @brief A fiddle factor to estimate the number of SysTick counts that would + * have occurred while the SysTick counter is stopped during tickless idle + * calculations. + */ +#define portMISSED_COUNTS_FACTOR ( 94UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Constants required to set up the initial stack. + */ +#define portINITIAL_XPSR ( 0x01000000 ) + +#if ( configRUN_FREERTOS_SECURE_ONLY == 1 ) + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF FD + * 1111 1111 1111 1111 1111 1111 1111 1101 + * + * Bit[6] - 1 --> The exception was taken from the Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 1 --> The exception was taken to the Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xfffffffd ) +#else + +/** + * @brief Initial EXC_RETURN value. + * + * FF FF FF BC + * 1111 1111 1111 1111 1111 1111 1011 1100 + * + * Bit[6] - 0 --> The exception was taken from the Non-Secure state. + * Bit[5] - 1 --> Do not skip stacking of additional state context. + * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context. + * Bit[3] - 1 --> Return to the Thread mode. + * Bit[2] - 1 --> Restore registers from the process stack. + * Bit[1] - 0 --> Reserved, 0. + * Bit[0] - 0 --> The exception was taken to the Non-Secure state. + */ + #define portINITIAL_EXC_RETURN ( 0xffffffbc ) +#endif /* configRUN_FREERTOS_SECURE_ONLY */ + +/** + * @brief CONTROL register privileged bit mask. + * + * Bit[0] in CONTROL register tells the privilege: + * Bit[0] = 0 ==> The task is privileged. + * Bit[0] = 1 ==> The task is not privileged. + */ +#define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL ) + +/** + * @brief Initial CONTROL register values. + */ +#define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 ) +#define portINITIAL_CONTROL_PRIVILEGED ( 0x2 ) + +/** + * @brief Let the user override the default SysTick clock rate. If defined by the + * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the + * configuration register. + */ +#ifndef configSYSTICK_CLOCK_HZ + #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ ) + /* Ensure the SysTick is clocked at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT ) +#else + /* Select the option to clock SysTick not at the same frequency as the core. */ + #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 ) +#endif + +/** + * @brief Let the user override the pre-loading of the initial LR with the + * address of prvTaskExitError() in case it messes up unwinding of the stack + * in the debugger. + */ +#ifdef configTASK_RETURN_ADDRESS + #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS +#else + #define portTASK_RETURN_ADDRESS prvTaskExitError +#endif + +/** + * @brief If portPRELOAD_REGISTERS then registers will be given an initial value + * when a task is created. This helps in debugging at the cost of code size. + */ +#define portPRELOAD_REGISTERS 1 + +/** + * @brief A task is created without a secure context, and must call + * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes + * any secure calls. + */ +#define portNO_SECURE_CONTEXT 0 +/*-----------------------------------------------------------*/ + +/** + * @brief Used to catch tasks that attempt to return from their implementing + * function. + */ +static void prvTaskExitError( void ); + +#if ( configENABLE_MPU == 1 ) + +/** + * @brief Setup the Memory Protection Unit (MPU). + */ + static void prvSetupMPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_FPU == 1 ) + +/** + * @brief Setup the Floating Point Unit (FPU). + */ + static void prvSetupFPU( void ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_FPU */ + +/** + * @brief Setup the timer to generate the tick interrupts. + * + * The implementation in this file is weak to allow application writers to + * change the timer used to generate the tick interrupt. + */ +void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether the current execution context is interrupt. + * + * @return pdTRUE if the current execution context is interrupt, pdFALSE + * otherwise. + */ +BaseType_t xPortIsInsideInterrupt( void ); + +/** + * @brief Yield the processor. + */ +void vPortYield( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Enter critical section. + */ +void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Exit from critical section. + */ +void vPortExitCritical( void ) PRIVILEGED_FUNCTION; + +/** + * @brief SysTick handler. + */ +void SysTick_Handler( void ) PRIVILEGED_FUNCTION; + +/** + * @brief C part of SVC handler. + */ +portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; +/*-----------------------------------------------------------*/ + +/** + * @brief Each task maintains its own interrupt status in the critical nesting + * variable. + */ +PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; + +#if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Saved as part of the task context to indicate which context the + * task is using on the secure side. + */ + PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; +#endif /* configENABLE_TRUSTZONE */ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + +/** + * @brief The number of SysTick increments that make up one tick period. + */ + PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0; + +/** + * @brief The maximum number of tick periods that can be suppressed is + * limited by the 24 bit resolution of the SysTick timer. + */ + PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0; + +/** + * @brief Compensate for the CPU cycles that pass while the SysTick is + * stopped (low power functionality only). + */ + PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0; +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE == 1 ) + __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ) + { + uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft; + TickType_t xModifiableIdleTime; + + /* Make sure the SysTick reload value does not overflow the counter. */ + if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks ) + { + xExpectedIdleTime = xMaximumPossibleSuppressedTicks; + } + + /* Enter a critical section but don't use the taskENTER_CRITICAL() + * method as that will mask interrupts that should exit sleep mode. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* If a context switch is pending or a task is waiting for the scheduler + * to be unsuspended then abandon the low power entry. */ + if( eTaskConfirmSleepModeStatus() == eAbortSleep ) + { + /* Re-enable interrupts - see comments above the cpsid instruction + * above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + else + { + /* Stop the SysTick momentarily. The time the SysTick is stopped for + * is accounted for as best it can be, but using the tickless mode will + * inevitably result in some tiny drift of the time maintained by the + * kernel with respect to calendar time. */ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Use the SysTick current-value register to determine the number of + * SysTick decrements remaining until the next tick interrupt. If the + * current-value register is zero, then there are actually + * ulTimerCountsForOneTick decrements remaining, not zero, because the + * SysTick requests the interrupt when decrementing from 1 to 0. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulTimerCountsForOneTick; + } + + /* Calculate the reload value required to wait xExpectedIdleTime + * tick periods. -1 is used because this code normally executes part + * way through the first tick period. But if the SysTick IRQ is now + * pending, then clear the IRQ, suppressing the first tick, and correct + * the reload value to reflect that the second tick period is already + * underway. The expected idle time is always at least two ticks. */ + ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) ); + + if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 ) + { + portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT; + ulReloadValue -= ulTimerCountsForOneTick; + } + + if( ulReloadValue > ulStoppedTimerCompensation ) + { + ulReloadValue -= ulStoppedTimerCompensation; + } + + /* Set the new reload value. */ + portNVIC_SYSTICK_LOAD_REG = ulReloadValue; + + /* Clear the SysTick count flag and set the count value back to + * zero. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Restart SysTick. */ + portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT; + + /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can + * set its parameter to 0 to indicate that its implementation contains + * its own wait for interrupt or wait for event instruction, and so wfi + * should not be executed again. However, the original expected idle + * time variable must remain unmodified, so a copy is taken. */ + xModifiableIdleTime = xExpectedIdleTime; + configPRE_SLEEP_PROCESSING( xModifiableIdleTime ); + + if( xModifiableIdleTime > 0 ) + { + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "wfi" ); + __asm volatile ( "isb" ); + } + + configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); + + /* Re-enable interrupts to allow the interrupt that brought the MCU + * out of sleep mode to execute immediately. See comments above + * the cpsid instruction above. */ + __asm volatile ( "cpsie i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable interrupts again because the clock is about to be stopped + * and interrupts that execute while the clock is stopped will increase + * any slippage between the time maintained by the RTOS and calendar + * time. */ + __asm volatile ( "cpsid i" ::: "memory" ); + __asm volatile ( "dsb" ); + __asm volatile ( "isb" ); + + /* Disable the SysTick clock without reading the + * portNVIC_SYSTICK_CTRL_REG register to ensure the + * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again, + * the time the SysTick is stopped for is accounted for as best it can + * be, but using the tickless mode will inevitably result in some tiny + * drift of the time maintained by the kernel with respect to calendar + * time*/ + portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT ); + + /* Determine whether the SysTick has already counted to zero. */ + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + uint32_t ulCalculatedLoadValue; + + /* The tick interrupt ended the sleep (or is now pending), and + * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG + * with whatever remains of the new tick period. */ + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG ); + + /* Don't allow a tiny value, or values that have somehow + * underflowed because the post sleep hook did something + * that took too long or because the SysTick current-value register + * is zero. */ + if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) ) + { + ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ); + } + + portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue; + + /* As the pending tick will be processed as soon as this + * function exits, the tick value maintained by the tick is stepped + * forward by one less than the time spent waiting. */ + ulCompleteTickPeriods = xExpectedIdleTime - 1UL; + } + else + { + /* Something other than the tick interrupt ended the sleep. */ + + /* Use the SysTick current-value register to determine the + * number of SysTick decrements remaining until the expected idle + * time would have ended. */ + ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT ) + { + /* If the SysTick is not using the core clock, the current- + * value register might still be zero here. In that case, the + * SysTick didn't load from the reload register, and there are + * ulReloadValue decrements remaining in the expected idle + * time, not zero. */ + if( ulSysTickDecrementsLeft == 0 ) + { + ulSysTickDecrementsLeft = ulReloadValue; + } + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Work out how long the sleep lasted rounded to complete tick + * periods (not the ulReload value which accounted for part + * ticks). */ + ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft; + + /* How many complete tick periods passed while the processor + * was waiting? */ + ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick; + + /* The reload value is set to whatever fraction of a single tick + * period remains. */ + portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements; + } + + /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again, + * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If + * the SysTick is not using the core clock, temporarily configure it to + * use the core clock. This configuration forces the SysTick to load + * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next + * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready + * to receive the standard value immediately. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT ) + { + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + } + #else + { + /* The temporary usage of the core clock has served its purpose, + * as described above. Resume usage of the other clock. */ + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT; + + if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 ) + { + /* The partial tick period already ended. Be sure the SysTick + * counts it only once. */ + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0; + } + + portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; + } + #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */ + + /* Step the tick to account for any tick periods that elapsed. */ + vTaskStepTick( ulCompleteTickPeriods ); + + /* Exit with interrupts enabled. */ + __asm volatile ( "cpsie i" ::: "memory" ); + } + } +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +__attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Calculate the constants required to configure the tick interrupt. */ + #if ( configUSE_TICKLESS_IDLE == 1 ) + { + ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ); + xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick; + ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ ); + } + #endif /* configUSE_TICKLESS_IDLE */ + + /* Stop and reset the SysTick. */ + portNVIC_SYSTICK_CTRL_REG = 0UL; + portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL; + + /* Configure SysTick to interrupt at the requested rate. */ + portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; + portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT; +} +/*-----------------------------------------------------------*/ + +static void prvTaskExitError( void ) +{ + volatile uint32_t ulDummy = 0UL; + + /* A function that implements a task must not exit or attempt to return to + * its caller as there is nothing to return to. If a task wants to exit it + * should instead call vTaskDelete( NULL ). Artificially force an assert() + * to be triggered if configASSERT() is defined, then stop here so + * application writers can catch the error. */ + configASSERT( ulCriticalNesting == ~0UL ); + portDISABLE_INTERRUPTS(); + + while( ulDummy == 0 ) + { + /* This file calls prvTaskExitError() after the scheduler has been + * started to remove a compiler warning about the function being + * defined but never called. ulDummy is used purely to quieten other + * warnings about code appearing after this function is called - making + * ulDummy volatile makes the compiler think the function could return + * and therefore not output an 'unreachable code' warning for code that + * appears after it. */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_functions_start__; + extern uint32_t * __privileged_functions_end__; + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + extern uint32_t * __unprivileged_flash_start__; + extern uint32_t * __unprivileged_flash_end__; + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else /* if defined( __ARMCC_VERSION ) */ + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_functions_start__[]; + extern uint32_t __privileged_functions_end__[]; + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + extern uint32_t __unprivileged_flash_start__[]; + extern uint32_t __unprivileged_flash_end__[]; + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* The only permitted number of regions are 8 or 16. */ + configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) ); + + /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */ + configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ); + + /* Check that the MPU is present. */ + if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE ) + { + /* MAIR0 - Index 0. */ + portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + /* MAIR0 - Index 1. */ + portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* Setup privileged flash as Read Only so that privileged tasks can + * read it but not modify. */ + portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged flash as Read Only by both privileged and + * unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup unprivileged syscalls flash as Read Only by both privileged + * and unprivileged tasks. All tasks can read it but no-one can modify. */ + portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_ONLY ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Setup RAM containing kernel data for privileged access only. */ + portMPU_RNR_REG = portPRIVILEGED_RAM_REGION; + portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_PRIVILEGED_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Enable mem fault. */ + portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT; + + /* Enable MPU with privileged background access i.e. unmapped + * regions have privileged access. */ + portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT ); + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_FPU == 1 ) + static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */ + { + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* Enable non-secure access to the FPU. */ + SecureInit_EnableNSFPUAccess(); + } + #endif /* configENABLE_TRUSTZONE */ + + /* CP10 = 11 ==> Full access to FPU i.e. both privileged and + * unprivileged code should be able to access FPU. CP11 should be + * programmed to the same value as CP10. */ + *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) | + ( portCPACR_CP11_VALUE << portCPACR_CP11_POS ) + ); + + /* ASPEN = 1 ==> Hardware should automatically preserve floating point + * context on exception entry and restore on exception return. + * LSPEN = 1 ==> Enable lazy context save of FP state. */ + *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK ); + } +#endif /* configENABLE_FPU */ +/*-----------------------------------------------------------*/ + +void vPortYield( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Set a PendSV to request a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + portDISABLE_INTERRUPTS(); + ulCriticalNesting++; + + /* Barriers are normally not required but do ensure the code is + * completely within the specified behaviour for the architecture. */ + __asm volatile ( "dsb" ::: "memory" ); + __asm volatile ( "isb" ); +} +/*-----------------------------------------------------------*/ + +void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */ +{ + configASSERT( ulCriticalNesting ); + ulCriticalNesting--; + + if( ulCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } +} +/*-----------------------------------------------------------*/ + +void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulPreviousMask; + + ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* Increment the RTOS tick. */ + if( xTaskIncrementTick() != pdFALSE ) + { + /* Pend a context switch. */ + portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask ); +} +/*-----------------------------------------------------------*/ + +void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ +{ + #if ( configENABLE_MPU == 1 ) + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + #endif /* configENABLE_MPU */ + + uint32_t ulPC; + + #if ( configENABLE_TRUSTZONE == 1 ) + uint32_t ulR0, ulR1; + extern TaskHandle_t pxCurrentTCB; + #if ( configENABLE_MPU == 1 ) + uint32_t ulControl, ulIsTaskPrivileged; + #endif /* configENABLE_MPU */ + #endif /* configENABLE_TRUSTZONE */ + uint8_t ucSVCNumber; + + /* Register are stored on the stack in the following order - R0, R1, R2, R3, + * R12, LR, PC, xPSR. */ + ulPC = pulCallerStackAddress[ 6 ]; + ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; + + switch( ucSVCNumber ) + { + #if ( configENABLE_TRUSTZONE == 1 ) + case portSVC_ALLOCATE_SECURE_CONTEXT: + + /* R0 contains the stack size passed as parameter to the + * vPortAllocateSecureContext function. */ + ulR0 = pulCallerStackAddress[ 0 ]; + + #if ( configENABLE_MPU == 1 ) + { + /* Read the CONTROL register value. */ + __asm volatile ( "mrs %0, control" : "=r" ( ulControl ) ); + + /* The task that raised the SVC is privileged if Bit[0] + * in the CONTROL register is 0. */ + ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 ); + + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB ); + } + #else /* if ( configENABLE_MPU == 1 ) */ + { + /* Allocate and load a context for the secure task. */ + xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB ); + } + #endif /* configENABLE_MPU */ + + configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID ); + SecureContext_LoadContext( xSecureContext, pxCurrentTCB ); + break; + + case portSVC_FREE_SECURE_CONTEXT: + + /* R0 contains TCB being freed and R1 contains the secure + * context handle to be freed. */ + ulR0 = pulCallerStackAddress[ 0 ]; + ulR1 = pulCallerStackAddress[ 1 ]; + + /* Free the secure context. */ + SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 ); + break; + #endif /* configENABLE_TRUSTZONE */ + + case portSVC_START_SCHEDULER: + #if ( configENABLE_TRUSTZONE == 1 ) + { + /* De-prioritize the non-secure exceptions so that the + * non-secure pendSV runs at the lowest priority. */ + SecureInit_DePrioritizeNSExceptions(); + + /* Initialize the secure context management system. */ + SecureContext_Init(); + } + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_FPU == 1 ) + { + /* Setup the Floating Point Unit (FPU). */ + prvSetupFPU(); + } + #endif /* configENABLE_FPU */ + + /* Setup the context of the first task so that the first task starts + * executing. */ + vRestoreContextOfFirstTask(); + break; + + #if ( configENABLE_MPU == 1 ) + case portSVC_RAISE_PRIVILEGE: + + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* configENABLE_MPU */ + + default: + /* Incorrect SVC call. */ + configASSERT( pdFALSE ); + } +} +/*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ +#if ( configENABLE_MPU == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ +#else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +#endif /* configENABLE_MPU */ +/* *INDENT-ON* */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #else /* portPRELOAD_REGISTERS */ + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + pxTopOfStack--; + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ + + #if ( configENABLE_MPU == 1 ) + { + pxTopOfStack--; + + if( xRunPrivileged == pdTRUE ) + { + *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + else + { + *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + } + } + #endif /* configENABLE_MPU */ + + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ + + #if ( configENABLE_TRUSTZONE == 1 ) + { + pxTopOfStack--; + *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */ + } + #endif /* configENABLE_TRUSTZONE */ + } + #endif /* portPRELOAD_REGISTERS */ + + return pxTopOfStack; +} +/*-----------------------------------------------------------*/ + +BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ + portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; + portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; + + #if ( configENABLE_MPU == 1 ) + { + /* Setup the Memory Protection Unit (MPU). */ + prvSetupMPU(); + } + #endif /* configENABLE_MPU */ + + /* Start the timer that generates the tick ISR. Interrupts are disabled + * here already. */ + vPortSetupTimerInterrupt(); + + /* Initialize the critical nesting count ready for the first task. */ + ulCriticalNesting = 0; + + /* Start the first task. */ + vStartFirstTask(); + + /* Should never get here as the tasks will now be executing. Call the task + * exit error function to prevent compiler warnings about a static function + * not being called in the case that the application writer overrides this + * functionality by defining configTASK_RETURN_ADDRESS. Call + * vTaskSwitchContext() so link time optimization does not remove the + * symbol. */ + vTaskSwitchContext(); + prvTaskExitError(); + + /* Should not get here. */ + return 0; +} +/*-----------------------------------------------------------*/ + +void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ +{ + /* Not implemented in ports where there is nothing to return to. + * Artificially force an assert. */ + configASSERT( ulCriticalNesting == 1000UL ); +} +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) + { + uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber; + int32_t lIndex = 0; + + #if defined( __ARMCC_VERSION ) + + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __privileged_sram_start__; + extern uint32_t * __privileged_sram_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __privileged_sram_start__[]; + extern uint32_t __privileged_sram_end__[]; + #endif /* defined( __ARMCC_VERSION ) */ + + /* Setup MAIR0. */ + xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK ); + xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK ); + + /* This function is called automatically when the task is created - in + * which case the stack region parameters will be valid. At all other + * times the stack parameters will not be valid and it is assumed that + * the stack region has already been configured. */ + if( ulStackDepth > 0 ) + { + ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1; + + /* If the stack is within the privileged SRAM, do not protect it + * using a separate MPU region. This is needed because privileged + * SRAM is already protected using an MPU region and ARMv8-M does + * not allow overlapping MPU regions. */ + if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) && + ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) ) + { + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0; + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0; + } + else + { + /* Define the region that allows access to the stack. */ + ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ) | + ( portMPU_REGION_READ_WRITE ) | + ( portMPU_REGION_EXECUTE_NEVER ); + + xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_ATTR_INDEX0 ) | + ( portMPU_RLAR_REGION_ENABLE ); + } + } + + /* User supplied configurable regions. */ + for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ ) + { + /* If xRegions is NULL i.e. the task has not specified any MPU + * region, the else part ensures that all the configurable MPU + * regions are invalidated. */ + if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) ) + { + /* Translate the generic region definition contained in xRegions + * into the ARMv8 specific MPU settings that are then stored in + * xMPUSettings. */ + ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK; + ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1; + ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK; + + /* Start address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) | + ( portMPU_REGION_NON_SHAREABLE ); + + /* RO/RW. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY ); + } + else + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE ); + } + + /* XN. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 ) + { + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER ); + } + + /* End Address. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) | + ( portMPU_RLAR_REGION_ENABLE ); + + /* Normal memory/ Device memory. */ + if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 ) + { + /* Attr1 in MAIR0 is configured as device memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1; + } + else + { + /* Attr0 in MAIR0 is configured as normal memory. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0; + } + } + else + { + /* Invalidate the region. */ + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL; + xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL; + } + + lIndex++; + } + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. Interrupt Program + * Status Register (IPSR) holds the exception number of the currently-executing + * exception or zero for Thread mode.*/ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h new file mode 100644 index 00000000000..ecd86b97fd1 --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.h @@ -0,0 +1,114 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef __PORT_ASM_H__ +#define __PORT_ASM_H__ + +/* Scheduler includes. */ +#include "FreeRTOS.h" + +/* MPU wrappers includes. */ +#include "mpu_wrappers.h" + +/** + * @brief Restore the context of the first task so that the first task starts + * executing. + */ +void vRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ +BaseType_t xIsPrivileged( void ) __attribute__( ( naked ) ); + +/** + * @brief Raises the privilege level by clearing the bit 0 of the CONTROL + * register. + * + * @note This is a privileged function and should only be called from the kenrel + * code. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vRaisePrivilege( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + * + * Bit 0 of the CONTROL register defines the privilege level of Thread Mode. + * Bit[0] = 0 --> The processor is running privileged + * Bit[0] = 1 --> The processor is running unprivileged. + */ +void vResetPrivilege( void ) __attribute__( ( naked ) ); + +/** + * @brief Starts the first task. + */ +void vStartFirstTask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Disables interrupts. + */ +uint32_t ulSetInterruptMask( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Enables interrupts. + */ +void vClearInterruptMask( uint32_t ulMask ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief PendSV Exception handler. + */ +void PendSV_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief SVC Handler. + */ +void SVC_Handler( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +/** + * @brief Allocate a Secure context for the calling task. + * + * @param[in] ulSecureStackSize The size of the stack to be allocated on the + * secure side for the calling task. + */ +void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) __attribute__( ( naked ) ); + +/** + * @brief Free the task's secure context. + * + * @param[in] pulTCB Pointer to the Task Control Block (TCB) of the task. + */ +void vPortFreeSecureContext( uint32_t * pulTCB ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION; + +#endif /* __PORT_ASM_H__ */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s new file mode 100644 index 00000000000..581b84d4951 --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s @@ -0,0 +1,262 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ +/* Including FreeRTOSConfig.h here will cause build errors if the header file +contains code not understood by the assembler - for example the 'extern' keyword. +To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so +the code is included in C files but excluded by the preprocessor in assembly +files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + EXTERN pxCurrentTCB + EXTERN vTaskSwitchContext + EXTERN vPortSVCHandler_C + + PUBLIC xIsPrivileged + PUBLIC vResetPrivilege + PUBLIC vRestoreContextOfFirstTask + PUBLIC vRaisePrivilege + PUBLIC vStartFirstTask + PUBLIC ulSetInterruptMask + PUBLIC vClearInterruptMask + PUBLIC PendSV_Handler + PUBLIC SVC_Handler +/*-----------------------------------------------------------*/ + +/*---------------- Unprivileged Functions -------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION .text:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +xIsPrivileged: + mrs r0, control /* r0 = CONTROL. */ + tst r0, #1 /* Perform r0 & 1 (bitwise AND) and update the conditions flag. */ + ite ne + movne r0, #0 /* CONTROL[0]!=0. Return false to indicate that the processor is not privileged. */ + moveq r0, #1 /* CONTROL[0]==0. Return true to indicate that the processor is not privileged. */ + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vResetPrivilege: + mrs r0, control /* r0 = CONTROL. */ + orr r0, r0, #1 /* r0 = r0 | 1. */ + msr control, r0 /* CONTROL = r0. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +/*----------------- Privileged Functions --------------------*/ + +/*-----------------------------------------------------------*/ + + SECTION privileged_functions:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +vRestoreContextOfFirstTask: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + msr control, r2 /* Set this task's CONTROL value. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r3 /* Finally, branch to EXC_RETURN. */ +#else /* configENABLE_MPU */ + ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + msr psplim, r1 /* Set this task's PSPLIM value. */ + movs r1, #2 /* r1 = 2. */ + msr CONTROL, r1 /* Switch to use PSP in the thread mode. */ + adds r0, #32 /* Discard everything up to r0. */ + msr psp, r0 /* This is now the new top of stack to use in the task. */ + isb + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx r2 /* Finally, branch to EXC_RETURN. */ +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +vRaisePrivilege: + mrs r0, control /* Read the CONTROL register. */ + bic r0, r0, #1 /* Clear the bit 0. */ + msr control, r0 /* Write back the new CONTROL value. */ + bx lr /* Return to the caller. */ +/*-----------------------------------------------------------*/ + +vStartFirstTask: + ldr r0, =0xe000ed08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] /* Read the VTOR register which gives the address of vector table. */ + ldr r0, [r0] /* The first entry in vector table is stack pointer. */ + msr msp, r0 /* Set the MSP back to the start of the stack. */ + cpsie i /* Globally enable interrupts. */ + cpsie f + dsb + isb + svc 2 /* System call to start the first task. portSVC_START_SCHEDULER = 2. */ +/*-----------------------------------------------------------*/ + +ulSetInterruptMask: + mrs r0, basepri /* r0 = basepri. Return original basepri value. */ + mov r1, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r1 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +vClearInterruptMask: + msr basepri, r0 /* basepri = ulMask. */ + dsb + isb + bx lr /* Return. */ +/*-----------------------------------------------------------*/ + +PendSV_Handler: + mrs r0, psp /* Read PSP in r0. */ +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ +#if ( configENABLE_MPU == 1 ) + mrs r1, psplim /* r1 = PSPLIM. */ + mrs r2, control /* r2 = CONTROL. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ +#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ + mov r3, lr /* r3 = LR/EXC_RETURN. */ + stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ +#endif /* configENABLE_MPU */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + str r0, [r1] /* Save the new top of stack in TCB. */ + + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r2] /* Read pxCurrentTCB. */ + ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ + +#if ( configENABLE_MPU == 1 ) + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ + str r4, [r2] /* Disable MPU. */ + + adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ + ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r3, [r2] /* Program MAIR0. */ + ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ + movs r3, #4 /* r3 = 4. */ + str r3, [r2] /* Program RNR = 4. */ + adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r4, [r2] /* Read the value of MPU_CTRL. */ + orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ + str r4, [r2] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ +#else /* configENABLE_MPU */ + ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ +#endif /* configENABLE_MPU */ + +#if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ + it eq + vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ +#endif /* configENABLE_FPU || configENABLE_MVE */ + + #if ( configENABLE_MPU == 1 ) + msr psplim, r1 /* Restore the PSPLIM register value for the task. */ + msr control, r2 /* Restore the CONTROL register value for the task. */ +#else /* configENABLE_MPU */ + msr psplim, r2 /* Restore the PSPLIM register value for the task. */ +#endif /* configENABLE_MPU */ + msr psp, r0 /* Remember the new top of stack for the task. */ + bx r3 +/*-----------------------------------------------------------*/ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C +/*-----------------------------------------------------------*/ + + END diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h new file mode 100644 index 00000000000..a0efc1f9dcf --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -0,0 +1,78 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACRO_H +#define PORTMACRO_H + +#ifdef __cplusplus + extern "C" { +#endif + +#include "portmacrocommon.h" + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + +/** + * Architecture specifics. + */ +#define portARCH_NAME "Cortex-M35P" +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +#if( configTOTAL_MPU_REGIONS == 16 ) + #error 16 MPU regions are not yet supported for this port. +#endif +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ +#define portDISABLE_INTERRUPTS() ulSetInterruptMask() +#define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) +/*-----------------------------------------------------------*/ + +/* Suppress warnings that are generated by the IAR tools, but cannot be fixed in + * the source code because to do so would cause other compilers to generate + * warnings. */ +#pragma diag_suppress=Be006 +#pragma diag_suppress=Pa082 +/*-----------------------------------------------------------*/ + +#ifdef __cplusplus + } +#endif + +#endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h new file mode 100644 index 00000000000..ca7e9225c05 --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -0,0 +1,313 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef PORTMACROCOMMON_H + #define PORTMACROCOMMON_H + + #ifdef __cplusplus + extern "C" { + #endif + +/*------------------------------------------------------------------------------ + * Port specific definitions. + * + * The settings in this file configure FreeRTOS correctly for the given hardware + * and compiler. + * + * These settings should not be altered. + *------------------------------------------------------------------------------ + */ + + #ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. + #endif /* configENABLE_FPU */ + + #ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. + #endif /* configENABLE_MPU */ + + #ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. + #endif /* configENABLE_TRUSTZONE */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Type definitions. + */ + #define portCHAR char + #define portFLOAT float + #define portDOUBLE double + #define portLONG long + #define portSHORT short + #define portSTACK_TYPE uint32_t + #define portBASE_TYPE long + + typedef portSTACK_TYPE StackType_t; + typedef long BaseType_t; + typedef unsigned long UBaseType_t; + + #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff + #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL + +/* 32-bit tick type on a 32-bit architecture, so reads of the tick count do + * not need to be guarded with a critical section. */ + #define portTICK_TYPE_IS_ATOMIC 1 + #else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. + #endif +/*-----------------------------------------------------------*/ + +/** + * Architecture specifics. + */ + #define portSTACK_GROWTH ( -1 ) + #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) + #define portBYTE_ALIGNMENT 8 + #define portNOP() + #define portINLINE __inline + #ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) + #endif + #define portHAS_STACK_OVERFLOW_CHECKING 1 +/*-----------------------------------------------------------*/ + +/** + * @brief Extern declarations. + */ + extern BaseType_t xPortIsInsideInterrupt( void ); + + extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; + + extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; + extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; + + extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; + + #if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; + #endif /* configENABLE_TRUSTZONE */ + + #if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief MPU specific constants. + */ + #if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) + #else + #define portPRIVILEGE_BIT ( 0x0UL ) + #endif /* configENABLE_MPU */ + +/* MPU settings that can be overriden in FreeRTOSConfig.h. */ +#ifndef configTOTAL_MPU_REGIONS + /* Define to 8 for backward compatibility. */ + #define configTOTAL_MPU_REGIONS ( 8UL ) +#endif + +/* MPU regions. */ + #define portPRIVILEGED_FLASH_REGION ( 0UL ) + #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) + #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) + #define portPRIVILEGED_RAM_REGION ( 3UL ) + #define portSTACK_REGION ( 4UL ) + #define portFIRST_CONFIGURABLE_REGION ( 5UL ) + #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) + #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) + #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ + +/* Device memory attributes used in MPU_MAIR registers. + * + * 8-bit values encoded as follows: + * Bit[7:4] - 0000 - Device Memory + * Bit[3:2] - 00 --> Device-nGnRnE + * 01 --> Device-nGnRE + * 10 --> Device-nGRE + * 11 --> Device-GRE + * Bit[1:0] - 00, Reserved. + */ + #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ + #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ + #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ + #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ + +/* Normal memory attributes used in MPU_MAIR registers. */ + #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ + #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ + +/* Attributes used in MPU_RBAR registers. */ + #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) + #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) + #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) + + #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) + #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) + #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) + #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) + + #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +/*-----------------------------------------------------------*/ + +/** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + +/** + * @brief MPU settings as stored in the TCB. + */ + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + } xMPU_SETTINGS; +/*-----------------------------------------------------------*/ + +/** + * @brief SVC numbers. + */ + #define portSVC_ALLOCATE_SECURE_CONTEXT 0 + #define portSVC_FREE_SECURE_CONTEXT 1 + #define portSVC_START_SCHEDULER 2 + #define portSVC_RAISE_PRIVILEGE 3 +/*-----------------------------------------------------------*/ + +/** + * @brief Scheduler utilities. + */ + #define portYIELD() vPortYield() + #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) + #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) + #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) + #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +/*-----------------------------------------------------------*/ + +/** + * @brief Critical section management. + */ + #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() + #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) + #define portENTER_CRITICAL() vPortEnterCritical() + #define portEXIT_CRITICAL() vPortExitCritical() +/*-----------------------------------------------------------*/ + +/** + * @brief Tickless idle/low power functionality. + */ + #ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) + #endif +/*-----------------------------------------------------------*/ + +/** + * @brief Task function macros as described on the FreeRTOS.org WEB site. + */ + #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) + #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/*-----------------------------------------------------------*/ + + #if ( configENABLE_TRUSTZONE == 1 ) + +/** + * @brief Allocate a secure context for the task. + * + * Tasks are not created with a secure context. Any task that is going to call + * secure functions must call portALLOCATE_SECURE_CONTEXT() to allocate itself a + * secure context before it calls any secure function. + * + * @param[in] ulSecureStackSize The size of the secure stack to be allocated. + */ + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + +/** + * @brief Called when a task is deleted to delete the task's secure context, + * if it has one. + * + * @param[in] pxTCB The TCB of the task being deleted. + */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) + #endif /* configENABLE_TRUSTZONE */ +/*-----------------------------------------------------------*/ + + #if ( configENABLE_MPU == 1 ) + +/** + * @brief Checks whether or not the processor is privileged. + * + * @return 1 if the processor is already privileged, 0 otherwise. + */ + #define portIS_PRIVILEGED() xIsPrivileged() + +/** + * @brief Raise an SVC request to raise privilege. + * + * The SVC handler checks that the SVC was raised from a system call and only + * then it raises the privilege. If this is called from any other place, + * the privilege is not raised. + */ + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + +/** + * @brief Lowers the privilege level by setting the bit 0 of the CONTROL + * register. + */ + #define portRESET_PRIVILEGE() vResetPrivilege() + #else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() + #endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + +/** + * @brief Barriers. + */ + #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +/*-----------------------------------------------------------*/ + + #ifdef __cplusplus + } + #endif + +#endif /* PORTMACROCOMMON_H */ From 0a70ecbb510ac91bdeaa1488b76ba8924efef283 Mon Sep 17 00:00:00 2001 From: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Date: Tue, 28 Feb 2023 13:22:25 +0530 Subject: [PATCH 15/62] Introduced Github Status Badge for Unit Tests (#634) * Introduced Github Status Badge for Unit Tests * Github status badge to point to latest run * Github status badge UT points to latest results * Fixed URL for Github Status badge --------- Co-authored-by: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 025a135774f..90c3c698d15 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +[![CMock Unit Tests](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml/badge.svg?branch=main&event=push)](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml?query=branch%3Amain+event%3Apush+workflow%3A%22CMock+Unit+Tests%22++) + ## Getting started This repository contains FreeRTOS kernel source/header files and kernel ports only. This repository is referenced as a submodule in [FreeRTOS/FreeRTOS](https://github.com/FreeRTOS/FreeRTOS) repository, which contains pre-configured demo application projects under ```FreeRTOS/Demo``` directory. From e6514fb1edacbf2535aa8e5d16b7ae06a18f57e3 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Thu, 2 Mar 2023 14:47:29 +0530 Subject: [PATCH 16/62] Remove C99 requirement from CMake file (#633) * Remove C99 requirement from CMake file The kernel source is C89 compliant and does not need C99. Signed-off-by: Gaurav Aggarwal * Explicitly set C89 requirement for kernel Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal --- CMakeLists.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index aead9a0d841..b51b7aaacdd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -230,8 +230,7 @@ endif() ######################################################################## # Requirements -set(CMAKE_C_STANDARD 99) -set(CMAKE_C_STANDARD_REQUIRED ON) +set_property(TARGET freertos_kernel PROPERTY C_STANDARD 90) ######################################################################## # Overall Compile Options From c3e1df031e3f988780afb2a743fde93c12135005 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 2 Mar 2023 08:26:04 -0800 Subject: [PATCH 17/62] Add Thread Local Storage (TLS) support using Picolibc functions (#343) * Pass top of stack to configINIT_TLS_BLOCK Picolibc wants to allocate the per-task TLS block within the stack segment, so it will need to modify the top of stack value. Pass the pxTopOfStack variable to make this explicit. Signed-off-by: Keith Packard * Move newlib-specific definitions to separate file This reduces the clutter in FreeRTOS.h caused by having newlib-specific macros present there. Signed-off-by: Keith Packard * Make TLS code depend only on configUSE_C_RUNTIME_TLS_SUPPORT Remove reference to configUSE_NEWLIB_REENTRANT as that only works when using newlib. configUSE_C_RUNTIME_TLS_SUPPORT is always set when configUSE_NEWLIB_REENTRANT is set, so using both was redundant in that case. Signed-off-by: Keith Packard * portable-ARC: Adapt ARC support to use generalized TLS support With generalized thread local storage (TLS) support present in the core, the two ARC ports need to have the changes to the TCB mirrored to them. Signed-off-by: Keith Packard * Add Thread Local Storage (TLS) support using Picolibc functions This patch provides definitions of the general TLS support macros in terms of the Picolibc TLS support functions. Picolibc is normally configured to use TLS internally for all variables that are intended to be task-local, so these changes are necessary for picolibc to work correctly with FreeRTOS. The picolibc helper functions rely on elements within the linker script to arrange the TLS data in memory and define some symbols. Applications wanting to use this mechanism will need changes in their linker script when migrating to picolibc. Signed-off-by: Keith Packard --------- Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- .github/lexicon.txt | 2 + include/FreeRTOS.h | 39 +++++--------- include/newlib-freertos.h | 62 +++++++++++++++++++++ include/picolibc-freertos.h | 69 ++++++++++++++++++++++++ portable/ThirdParty/GCC/ARC_EM_HS/port.c | 12 +---- portable/ThirdParty/GCC/ARC_v1/port.c | 12 +---- tasks.c | 12 ++--- 7 files changed, 155 insertions(+), 53 deletions(-) create mode 100644 include/newlib-freertos.h create mode 100644 include/picolibc-freertos.h diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 47ce4ccde11..47d2349bfff 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1054,6 +1054,7 @@ mclk mconfigintcoresw mcr mcu +md mddr mder mdh @@ -1335,6 +1336,7 @@ phy phya pic picnt +picolibc pien piir pimr diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index e720480dc64..f1ab32c6c1b 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -95,41 +95,26 @@ /* Required if struct _reent is used. */ #if ( configUSE_NEWLIB_REENTRANT == 1 ) -/* Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. - * - * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html - * for additional information. */ - #include + #include "newlib-freertos.h" - #define configUSE_C_RUNTIME_TLS_SUPPORT 1 +#endif /* if ( configUSE_NEWLIB_REENTRANT == 1 ) */ - #ifndef configTLS_BLOCK_TYPE - #define configTLS_BLOCK_TYPE struct _reent - #endif +/* Must be defaulted before configUSE_PICOLIBC_TLS is used below. */ +#ifndef configUSE_PICOLIBC_TLS + #define configUSE_PICOLIBC_TLS 0 +#endif - #ifndef configINIT_TLS_BLOCK - #define configINIT_TLS_BLOCK( xTLSBlock ) _REENT_INIT_PTR( &( xTLSBlock ) ) - #endif +#if ( configUSE_PICOLIBC_TLS == 1 ) - #ifndef configSET_TLS_BLOCK - #define configSET_TLS_BLOCK( xTLSBlock ) ( _impure_ptr = &( xTLSBlock ) ) - #endif + #include "picolibc-freertos.h" - #ifndef configDEINIT_TLS_BLOCK - #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) - #endif -#endif /* if ( configUSE_NEWLIB_REENTRANT == 1 ) */ +#endif /* if ( configUSE_PICOLIBC_TLS == 1 ) */ #ifndef configUSE_C_RUNTIME_TLS_SUPPORT #define configUSE_C_RUNTIME_TLS_SUPPORT 0 #endif -#if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) +#if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) #ifndef configTLS_BLOCK_TYPE #error Missing definition: configTLS_BLOCK_TYPE must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. @@ -146,7 +131,7 @@ #ifndef configDEINIT_TLS_BLOCK #error Missing definition: configDEINIT_TLS_BLOCK must be defined in FreeRTOSConfig.h when configUSE_C_RUNTIME_TLS_SUPPORT is set to 1. #endif -#endif /* if ( ( configUSE_NEWLIB_REENTRANT == 0 ) && ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) */ +#endif /* if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) */ /* * Check all the required application specific macros have been defined. @@ -1306,7 +1291,7 @@ typedef struct xSTATIC_TCB #if ( configGENERATE_RUN_TIME_STATS == 1 ) configRUN_TIME_COUNTER_TYPE ulDummy16; #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) configTLS_BLOCK_TYPE xDummy17; #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) diff --git a/include/newlib-freertos.h b/include/newlib-freertos.h new file mode 100644 index 00000000000..497ca529990 --- /dev/null +++ b/include/newlib-freertos.h @@ -0,0 +1,62 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef INC_NEWLIB_FREERTOS_H +#define INC_NEWLIB_FREERTOS_H + +/* Note Newlib support has been included by popular demand, but is not + * used by the FreeRTOS maintainers themselves. FreeRTOS is not + * responsible for resulting newlib operation. User must be familiar with + * newlib and must provide system-wide implementations of the necessary + * stubs. Be warned that (at the time of writing) the current newlib design + * implements a system-wide malloc() that must be provided with locks. + * + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + +#include + +#define configUSE_C_RUNTIME_TLS_SUPPORT 1 + +#ifndef configTLS_BLOCK_TYPE + #define configTLS_BLOCK_TYPE struct _reent +#endif + +#ifndef configINIT_TLS_BLOCK + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) _REENT_INIT_PTR( &( xTLSBlock ) ) +#endif + +#ifndef configSET_TLS_BLOCK + #define configSET_TLS_BLOCK( xTLSBlock ) _impure_ptr = &( xTLSBlock ) +#endif + +#ifndef configDEINIT_TLS_BLOCK + #define configDEINIT_TLS_BLOCK( xTLSBlock ) _reclaim_reent( &( xTLSBlock ) ) +#endif + +#endif /* INC_NEWLIB_FREERTOS_H */ diff --git a/include/picolibc-freertos.h b/include/picolibc-freertos.h new file mode 100644 index 00000000000..472d71e9932 --- /dev/null +++ b/include/picolibc-freertos.h @@ -0,0 +1,69 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef INC_PICOLIBC_FREERTOS_H +#define INC_PICOLIBC_FREERTOS_H + +/* Use picolibc TLS support to allocate space for __thread variables, + * initialize them at thread creation and set the TLS context at + * thread switch time. + * + * See the picolibc TLS docs: + * https://github.com/picolibc/picolibc/blob/main/doc/tls.md + * for additional information. */ + +#include + +#define configUSE_C_RUNTIME_TLS_SUPPORT 1 + +#define configTLS_BLOCK_TYPE void * + +/* Allocate thread local storage block off the end of the +* stack. The _tls_size() function returns the size (in +* bytes) of the total TLS area used by the application */ +#if ( portSTACK_GROWTH < 0 ) + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ + do { \ + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) - _tls_size() ); \ + xTLSBlock = pxTopOfStack; \ + _init_tls( xTLSBlock ); \ + } while( 0 ) +#else /* portSTACK_GROWTH */ + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ + do { \ + xTLSBlock = pxTopOfStack; \ + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) + _tls_size() ); \ + _init_tls( xTLSBlock ); \ + } while( 0 ) +#endif /* portSTACK_GROWTH */ + +#define configSET_TLS_BLOCK( xTLSBlock ) _set_tls( xTLSBlock ) + +#define configDEINIT_TLS_BLOCK( xTLSBlock ) + +#endif /* INC_PICOLIBC_FREERTOS_H */ diff --git a/portable/ThirdParty/GCC/ARC_EM_HS/port.c b/portable/ThirdParty/GCC/ARC_EM_HS/port.c index 57be56a7f5f..0e023088efd 100644 --- a/portable/ThirdParty/GCC/ARC_EM_HS/port.c +++ b/portable/ThirdParty/GCC/ARC_EM_HS/port.c @@ -251,16 +251,8 @@ void vPortEndTask( void ) uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - - /* Allocate a Newlib reent structure that is specific to this task. - * Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. */ - struct _reent xNewLib_reent; + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) diff --git a/portable/ThirdParty/GCC/ARC_v1/port.c b/portable/ThirdParty/GCC/ARC_v1/port.c index d3de6fa3d60..4dbdc7e601c 100644 --- a/portable/ThirdParty/GCC/ARC_v1/port.c +++ b/portable/ThirdParty/GCC/ARC_v1/port.c @@ -251,16 +251,8 @@ void vPortEndTask( void ) uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( configUSE_NEWLIB_REENTRANT == 1 ) - - /* Allocate a Newlib reent structure that is specific to this task. - * Note Newlib support has been included by popular demand, but is not - * used by the FreeRTOS maintainers themselves. FreeRTOS is not - * responsible for resulting newlib operation. User must be familiar with - * newlib and must provide system-wide implementations of the necessary - * stubs. Be warned that (at the time of writing) the current newlib design - * implements a system-wide malloc() that must be provided with locks. */ - struct _reent xNewLib_reent; + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) + configTLS_BLOCK_TYPE xTLSBlock; /*< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif #if ( configUSE_TASK_NOTIFICATIONS == 1 ) diff --git a/tasks.c b/tasks.c index 2b73b1b9703..0e7a56c6058 100644 --- a/tasks.c +++ b/tasks.c @@ -300,7 +300,7 @@ typedef struct tskTaskControlBlock /* The old naming convention is used to configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */ #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */ #endif @@ -955,10 +955,10 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, } #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Allocate and initialize memory for the task's TLS Block. */ - configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock ); + configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack ); } #endif @@ -2030,7 +2030,7 @@ void vTaskStartScheduler( void ) * starts to run. */ portDISABLE_INTERRUPTS(); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Switch C-Runtime's TLS Block to point to the TLS * block specific to the task that will run first. */ @@ -3074,7 +3074,7 @@ void vTaskSwitchContext( void ) } #endif - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Switch C-Runtime's TLS Block to point to the TLS * Block specific to this task. */ @@ -3950,7 +3950,7 @@ static void prvCheckTasksWaitingTermination( void ) * want to allocate and clean RAM statically. */ portCLEAN_UP_TCB( pxTCB ); - #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) || ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) ) + #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 ) { /* Free up the memory allocated for the task's TLS Block. */ configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock ); From a9e1f668497b9832edd4f1a85a1d37f4e8409cf9 Mon Sep 17 00:00:00 2001 From: Chris Copeland Date: Thu, 2 Mar 2023 09:49:56 -0800 Subject: [PATCH 18/62] Interrupt priority assert improvements for CM3/4/7 (#602) * Interrupt priority assert improvements for CM3/4/7 In the ARM_CM3, ARM_CM4, and ARM_CM7 ports, change the assertion that `configMAX_SYSCALL_INTERRUPT_PRIORITY` is nonzero to account for the number of priority bits implemented by the hardware. Change these ports to also use the lowest priority for PendSV and SysTick, ignoring `configKERNEL_INTERRUPT_PRIORITY`. * Remove not needed configKERNEL_INTERRUPT_PRIORITY define Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal --- portable/CCS/ARM_CM3/port.c | 13 +++++++++++-- portable/CCS/ARM_CM4F/port.c | 13 +++++++++++-- portable/GCC/ARM_CM3/port.c | 24 +++++++++++------------- portable/GCC/ARM_CM3_MPU/port.c | 17 +++++++++++------ portable/GCC/ARM_CM4F/port.c | 17 +++++++++++------ portable/GCC/ARM_CM4_MPU/port.c | 17 +++++++++++------ portable/GCC/ARM_CM7/r0p1/port.c | 17 +++++++++++------ portable/IAR/ARM_CM0/port.c | 7 ------- portable/IAR/ARM_CM3/port.c | 24 +++++++++++------------- portable/IAR/ARM_CM4F/port.c | 17 +++++++++++------ portable/IAR/ARM_CM4F_MPU/port.c | 17 +++++++++++------ portable/IAR/ARM_CM7/r0p1/port.c | 17 +++++++++++------ portable/MikroC/ARM_CM4F/port.c | 21 +++++++++++---------- portable/RVDS/ARM_CM3/port.c | 21 +++++++++++---------- portable/RVDS/ARM_CM4F/port.c | 17 +++++++++++------ portable/RVDS/ARM_CM4_MPU/port.c | 13 +++++++++++-- portable/RVDS/ARM_CM7/r0p1/port.c | 17 +++++++++++------ portable/Tasking/ARM_CM4F/port.c | 7 ++++--- 18 files changed, 180 insertions(+), 116 deletions(-) diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index 80e0b0a2410..dfdb240ed2b 100644 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -52,8 +52,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -239,6 +240,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index 4006191705e..360e83d7000 100644 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -56,8 +56,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -258,6 +259,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index 7f650fd360d..a5cea1e1e1b 100644 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -34,13 +34,6 @@ #include "FreeRTOS.h" #include "task.h" -/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is - * defined. The value should also ensure backward compatibility. - * FreeRTOS.org versions prior to V4.4.0 did not include this definition. */ -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 255 -#endif - /* Constants required to manipulate the core. Registers first... */ #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) ) #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) ) @@ -55,8 +48,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -265,10 +259,6 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -293,6 +283,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index e0f1d170154..5f7d2c81113 100644 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -83,8 +83,9 @@ /* Constants required to access and manipulate the SysTick. */ #define portNVIC_SYSTICK_INT ( 0x00000002UL ) #define portNVIC_SYSTICK_ENABLE ( 0x00000001UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to set up the initial stack. */ @@ -381,10 +382,6 @@ static void prvRestoreContextOfFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See - * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); - #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -409,6 +406,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index b946e6e9435..b978f0af54c 100644 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -58,8 +58,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -295,10 +296,6 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* This port can be used on all revisions of the Cortex-M7 core other than * the r0p1 parts. r0p1 parts should use the port from the * /source/portable/GCC/ARM_CM7/r0p1 directory. */ @@ -329,6 +326,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index 3125f7824d4..57d0ea5a2f6 100644 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -93,8 +93,9 @@ /* Constants required to access and manipulate the SysTick. */ #define portNVIC_SYSTICK_INT ( 0x00000002UL ) #define portNVIC_SYSTICK_ENABLE ( 0x00000001UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to manipulate the VFP. */ @@ -412,10 +413,6 @@ static void prvRestoreContextOfFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See - * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); - /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 * and r0p1 cores. */ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) @@ -452,6 +449,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index a9c69aae7fa..c602bd297cd 100644 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -52,8 +52,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -289,10 +290,6 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -317,6 +314,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/IAR/ARM_CM0/port.c b/portable/IAR/ARM_CM0/port.c index ad168e577ae..c55af1fd8ad 100644 --- a/portable/IAR/ARM_CM0/port.c +++ b/portable/IAR/ARM_CM0/port.c @@ -56,13 +56,6 @@ /* Constants required to set up the initial stack. */ #define portINITIAL_XPSR ( 0x01000000 ) -/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is - * defined. The value 255 should also ensure backward compatibility. - * FreeRTOS.org versions prior to V4.3.0 did not include this definition. */ -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 0 -#endif - /* Each task maintains its own interrupt status in the critical nesting * variable. */ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index cef6c247216..f35ee98c798 100644 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -55,8 +55,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -86,13 +87,6 @@ * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) -/* For backward compatibility, ensure configKERNEL_INTERRUPT_PRIORITY is - * defined. The value 255 should also ensure backward compatibility. - * FreeRTOS.org versions prior to V4.3.0 did not include this definition. */ -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 255 -#endif - /* Let the user override the default SysTick clock rate. If defined by the * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the * configuration register. */ @@ -214,10 +208,6 @@ static void prvTaskExitError( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -242,6 +232,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index 243d0578585..90be11d0eda 100644 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -65,8 +65,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -239,10 +240,6 @@ static void prvTaskExitError( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* This port can be used on all revisions of the Cortex-M7 core other than * the r0p1 parts. r0p1 parts should use the port from the * /source/portable/GCC/ARM_CM7/r0p1 directory. */ @@ -273,6 +270,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 2f4d1b9e349..dd6bde281c2 100644 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -104,8 +104,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ @@ -346,10 +347,6 @@ void vPortSVCHandler_C( uint32_t * pulParam ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - /* Errata 837070 workaround must only be enabled on Cortex-M7 r0p0 * and r0p1 cores. */ #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) @@ -386,6 +383,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 207f0b3eb67..e9e3805f054 100644 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -59,8 +59,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -233,10 +234,6 @@ static void prvTaskExitError( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -261,6 +258,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index a0b83ebd30b..886913fbc40 100644 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -48,8 +48,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -295,10 +296,6 @@ static void prvPortStartFirstTask( void ) */ BaseType_t xPortStartScheduler( void ) { - /* configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( configMAX_SYSCALL_INTERRUPT_PRIORITY ); - #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; @@ -320,13 +317,17 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index 551fb3441b5..ac4bef29572 100644 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -34,10 +34,6 @@ #include "FreeRTOS.h" #include "task.h" -#ifndef configKERNEL_INTERRUPT_PRIORITY - #define configKERNEL_INTERRUPT_PRIORITY 255 -#endif - #if configMAX_SYSCALL_INTERRUPT_PRIORITY == 0 #error configMAX_SYSCALL_INTERRUPT_PRIORITY must not be set to 0. See http: /*www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ #endif @@ -65,8 +61,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -285,13 +282,17 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index d6fb9d7a19c..cdc063dc9f8 100644 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -71,8 +71,9 @@ #define portCORTEX_M7_r0p1_ID ( 0x410FC271UL ) #define portCORTEX_M7_r0p0_ID ( 0x410FC270UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -347,13 +348,17 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 22680d77623..b9c7f90b446 100644 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -83,8 +83,9 @@ #define portNVIC_SYSTICK_CLK ( 0x00000004UL ) #define portNVIC_SYSTICK_INT ( 0x00000002UL ) #define portNVIC_SYSTICK_ENABLE ( 0x00000001UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) #define portNVIC_SVC_PRI ( ( ( uint32_t ) configMAX_SYSCALL_INTERRUPT_PRIORITY - 1UL ) << 24UL ) /* Constants required to manipulate the VFP. */ @@ -442,6 +443,14 @@ BaseType_t xPortStartScheduler( void ) /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index ae0a4a6d943..7dadb9adb34 100644 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -65,8 +65,9 @@ #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL ) #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL ) -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16UL ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24UL ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Constants required to check the validity of an interrupt priority. */ #define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) @@ -331,13 +332,17 @@ BaseType_t xPortStartScheduler( void ) /* Read the value back to see how many bits stuck. */ ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* The kernel interrupt priority should be set to the lowest - * priority. */ - configASSERT( ucMaxPriorityValue == ( configKERNEL_INTERRUPT_PRIORITY & ucMaxPriorityValue ) ); - /* Use the same mask on the maximum system call priority. */ ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; diff --git a/portable/Tasking/ARM_CM4F/port.c b/portable/Tasking/ARM_CM4F/port.c index d33a3982630..66b0d629466 100644 --- a/portable/Tasking/ARM_CM4F/port.c +++ b/portable/Tasking/ARM_CM4F/port.c @@ -41,8 +41,9 @@ #define portNVIC_SYSTICK_CLK 0x00000004 #define portNVIC_SYSTICK_INT 0x00000002 #define portNVIC_SYSTICK_ENABLE 0x00000001 -#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 16 ) -#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) configKERNEL_INTERRUPT_PRIORITY ) << 24 ) +#define portMIN_INTERRUPT_PRIORITY ( 255UL ) +#define portNVIC_PENDSV_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 16UL ) +#define portNVIC_SYSTICK_PRI ( ( ( uint32_t ) portMIN_INTERRUPT_PRIORITY ) << 24UL ) /* Masks off all bits but the VECTACTIVE bits in the ICSR register. */ #define portVECTACTIVE_MASK ( 0xFFUL ) @@ -70,7 +71,7 @@ /* The priority used by the kernel is assigned to a variable to make access * from inline assembler easier. */ -const uint32_t ulKernelPriority = configKERNEL_INTERRUPT_PRIORITY; +const uint32_t ulKernelPriority = portMIN_INTERRUPT_PRIORITY; /* Each task maintains its own interrupt status in the critical nesting * variable. */ From 97acc2e54a5aa7e1e47250f76b30627880255fb0 Mon Sep 17 00:00:00 2001 From: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Date: Thu, 2 Mar 2023 23:36:57 +0530 Subject: [PATCH 19/62] Introduced code coverage status badge (#635) * Introduced code coverage status badge * Trying to fix the URL checker issue * Fix URL check Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- .github/actions/url_verifier.sh | 11 +++++++---- README.md | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/actions/url_verifier.sh b/.github/actions/url_verifier.sh index 4c8aed53b12..e9804657b20 100755 --- a/.github/actions/url_verifier.sh +++ b/.github/actions/url_verifier.sh @@ -28,13 +28,14 @@ function test { for UNIQ_URL in ${!dict[@]} # loop urls do - CURL_RES=$(curl -si --user-agent "$(USER_AGENT)" ${UNIQ_URL} 2>/dev/null| head -n 1 | cut -f 2 -d ' ') + CURL_RES=$(curl -si --user-agent "${USER_AGENT}" ${UNIQ_URL} 2>/dev/null| head -n 1 | cut -f 2 -d ' ') RES=$? + echo "=================================" + echo "Checking URL: ${UNIQ_URL}" + if [ "${CURL_RES}" == '' -o "${CURL_RES}" != '200' ] then - echo "URL is: ${UNIQ_URL}" - echo "File names: ${dict[$UNIQ_URL]}" if [ "${CURL_RES}" == '' ] # curl returned an error then CURL_RES=$RES @@ -47,8 +48,10 @@ function test { else echo WARNING: Result is: "${CURL_RES}" fi - echo "=================================" + else + echo SUCCESS: Result is: "${CURL_RES}" fi + echo "=================================" done if [ "${SCRIPT_RET}" -eq 0 ] diff --git a/README.md b/README.md index 90c3c698d15..952914daf46 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ [![CMock Unit Tests](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml/badge.svg?branch=main&event=push)](https://github.com/FreeRTOS/FreeRTOS-Kernel/actions/workflows/unit-tests.yml?query=branch%3Amain+event%3Apush+workflow%3A%22CMock+Unit+Tests%22++) - +[![codecov](https://codecov.io/gh/FreeRTOS/FreeRTOS-Kernel/badge.svg?branch=main)](https://codecov.io/gh/FreeRTOS/FreeRTOS-Kernel) ## Getting started This repository contains FreeRTOS kernel source/header files and kernel ports only. This repository is referenced as a submodule in [FreeRTOS/FreeRTOS](https://github.com/FreeRTOS/FreeRTOS) repository, which contains pre-configured demo application projects under ```FreeRTOS/Demo``` directory. From ddd50d9a80fd80e2bca7e93d49baf5c71986cbbb Mon Sep 17 00:00:00 2001 From: Joseph Julicher Date: Fri, 3 Mar 2023 19:01:16 -0700 Subject: [PATCH 20/62] added portPOINTER_SIZE_TYPE and SIZE_MAX definition to PIC24/dsPIC port (#636) * added portPOINTER_SIZE_TYPE definition to PIC24/dsPIC port * Added SIZE_MAX definition to PIC24/dsPIC33 --- portable/MPLAB/PIC24_dsPIC/portmacro.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/portable/MPLAB/PIC24_dsPIC/portmacro.h b/portable/MPLAB/PIC24_dsPIC/portmacro.h index d05317e6f8c..83b695a43bb 100644 --- a/portable/MPLAB/PIC24_dsPIC/portmacro.h +++ b/portable/MPLAB/PIC24_dsPIC/portmacro.h @@ -51,6 +51,8 @@ extern "C" { #define portSHORT short #define portSTACK_TYPE uint16_t #define portBASE_TYPE short +#define portPOINTER_SIZE_TYPE size_t +#define SIZE_MAX ( ( size_t ) -1 ) typedef portSTACK_TYPE StackType_t; typedef short BaseType_t; From 563c57e7dab713aa1e289795c44549239f732a43 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sun, 5 Mar 2023 22:29:39 -0800 Subject: [PATCH 21/62] Fix TLS and stack alignment when using picolibc (#637) Both the TLS block and stack must be correctly aligned when using picolibc. The architecture stack alignment is represented by the portBYTE_ALIGNMENT_MASK and the TLS block alignment is provided by the Picolibc _tls_align() inline function for Picolibc version 1.8 and above. For older versions of Picolibc, we'll assume that the TLS block requires the same alignment as the stack. For downward growing stacks, this requires aligning the start of the TLS block to the maximum of the stack alignment and the TLS alignment. With this, both the TLS block and stack will now be correctly aligned. For upward growing stacks, the two areas must be aligned independently; the TLS block is aligned from the start of the stack, then the tls space is allocated, and then the stack is aligned above that. It's probably useful to know here that the linker ensures that variables within the TLS block are assigned offsets that match their alignment requirements. If the TLS block itself is correctly aligned, then everything within will also be. I have only tested the downward growing stack branch of this patch. Signed-off-by: Keith Packard Co-authored-by: Keith Packard Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- include/picolibc-freertos.h | 41 ++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/include/picolibc-freertos.h b/include/picolibc-freertos.h index 472d71e9932..467f7a97091 100644 --- a/include/picolibc-freertos.h +++ b/include/picolibc-freertos.h @@ -43,22 +43,43 @@ #define configTLS_BLOCK_TYPE void * +#define picolibcTLS_SIZE ( ( portPOINTER_SIZE_TYPE ) _tls_size() ) +#define picolibcSTACK_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) + +#if __PICOLIBC_MAJOR__ > 1 || __PICOLIBC_MINOR__ >= 8 + +/* Picolibc 1.8 and newer have explicit alignment values provided + * by the _tls_align() inline */ + #define picolibcTLS_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) ( _tls_align() - 1 ) ) +#else + +/* For older Picolibc versions, use the general port alignment value */ + #define picolibcTLS_ALIGNMENT_MASK ( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) +#endif + /* Allocate thread local storage block off the end of the * stack. The _tls_size() function returns the size (in * bytes) of the total TLS area used by the application */ #if ( portSTACK_GROWTH < 0 ) - #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ - do { \ - pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) - _tls_size() ); \ - xTLSBlock = pxTopOfStack; \ - _init_tls( xTLSBlock ); \ + + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ + do { \ + pxTopOfStack = ( StackType_t * ) ( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) \ + - picolibcTLS_SIZE ) & ~ \ + configMAX( picolibcSTACK_ALIGNMENT_MASK, \ + picolibcTLS_ALIGNMENT_MASK ) ); \ + xTLSBlock = pxTopOfStack; \ + _init_tls( xTLSBlock ); \ } while( 0 ) #else /* portSTACK_GROWTH */ - #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ - do { \ - xTLSBlock = pxTopOfStack; \ - pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) + _tls_size() ); \ - _init_tls( xTLSBlock ); \ + #define configINIT_TLS_BLOCK( xTLSBlock, pxTopOfStack ) \ + do { \ + xTLSBlock = ( void * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack + \ + picolibcTLS_ALIGNMENT_MASK ) & ~picolibcTLS_ALIGNMENT_MASK ); \ + pxTopOfStack = ( StackType_t * ) ( ( ( ( ( portPOINTER_SIZE_TYPE ) xTLSBlock ) + \ + picolibcTLS_SIZE ) + picolibcSTACK_ALIGNMENT_MASK ) & \ + ~picolibcSTACK_ALIGNMENT_MASK ); \ + _init_tls( xTLSBlock ); \ } while( 0 ) #endif /* portSTACK_GROWTH */ From 7b26ea6263d795bd1090eac60d63a29080934f2f Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Mon, 6 Mar 2023 08:19:28 -0800 Subject: [PATCH 22/62] Enable building the GCC Cortex-R5 port without an FPU (#586) * Ensure configUSE_TASK_FPU_SUPPORT option is set correctly If one does enable the FPU of the Cortex-R5 processor, then the GCC compiler will define the macro __ARM_FP. This can be used to ensure, that the configUSE_TASK_FPU_SUPPORT is set accordingly. * Enable the implementation of vPortTaskUsesFPU only if configUSE_TASK_FPU_SUPPORT is set to 1 * Remove error case in pxPortInitialiseStack The case of configUSE_TASK_FPU_SUPPORT is 0 is now handled * Enable access to FPU registers only if FPU is enabled * Make minor formating changes * Format ARM Cortex-R5 port * Address review comments from @ChristosZosi * Minor code review suggestions Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal Co-authored-by: Christos Zosimidis Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal --- portable/GCC/ARM_CR5/port.c | 311 ++++++++++++++++++++----------- portable/GCC/ARM_CR5/portASM.S | 86 +++++---- portable/GCC/ARM_CR5/portmacro.h | 250 +++++++++++++------------ 3 files changed, 385 insertions(+), 262 deletions(-) diff --git a/portable/GCC/ARM_CR5/port.c b/portable/GCC/ARM_CR5/port.c index 8a9839cde56..03007821967 100644 --- a/portable/GCC/ARM_CR5/port.c +++ b/portable/GCC/ARM_CR5/port.c @@ -74,25 +74,52 @@ #error configMAX_API_CALL_INTERRUPT_PRIORITY must be greater than ( configUNIQUE_INTERRUPT_PRIORITIES / 2 ) #endif -/* Some vendor specific files default configCLEAR_TICK_INTERRUPT() in - * portmacro.h. */ +/* + * __ARM_FP is defined by the c preprocessor when FPU support is enabled, + * usually with the -mfpu= argument and -mfloat-abi=. + * + * Note: Some implementations of the c standard library may use FPU registers + * for generic memory operations (memcpy, etc). + * When setting configUSE_TASK_FPU_SUPPORT == 1, care must be taken to + * ensure that the FPU registers are not used without an FPU context. + */ +#if ( configUSE_TASK_FPU_SUPPORT == 0 ) + #ifdef __ARM_FP + #error __ARM_FP is defined, so configUSE_TASK_FPU_SUPPORT must be set to either to 1 or 2. + #endif /* __ARM_FP */ +#elif ( configUSE_TASK_FPU_SUPPORT == 1 ) || ( configUSE_TASK_FPU_SUPPORT == 2 ) + #ifndef __ARM_FP + #error __ARM_FP is not defined, so configUSE_TASK_FPU_SUPPORT must be set to 0. + #endif /* __ARM_FP */ +#endif /* configUSE_TASK_FPU_SUPPORT */ + +/* + * Some vendor specific files default configCLEAR_TICK_INTERRUPT() in + * portmacro.h. + */ #ifndef configCLEAR_TICK_INTERRUPT #define configCLEAR_TICK_INTERRUPT() #endif -/* A critical section is exited when the critical section nesting count reaches - * this value. */ +/* + * A critical section is exited when the critical section nesting count reaches + * this value. + */ #define portNO_CRITICAL_NESTING ( ( uint32_t ) 0 ) -/* In all GICs 255 can be written to the priority mask register to unmask all - * (but the lowest) interrupt priority. */ +/* + * In all GICs 255 can be written to the priority mask register to unmask all + * (but the lowest) interrupt priority. + */ #define portUNMASK_VALUE ( 0xFFUL ) -/* Tasks are not created with a floating point context, but can be given a +/* + * Tasks are not created with a floating point context, but can be given a * floating point context after they have been created. A variable is stored as * part of the tasks context that holds portNO_FLOATING_POINT_CONTEXT if the task * does not have an FPU context, or any other value if the task does have an FPU - * context. */ + * context. + */ #define portNO_FLOATING_POINT_CONTEXT ( ( StackType_t ) 0 ) /* Constants required to setup the initial task context. */ @@ -101,8 +128,10 @@ #define portINTERRUPT_ENABLE_BIT ( 0x80UL ) #define portTHUMB_MODE_ADDRESS ( 0x01UL ) -/* Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary - * point is zero. */ +/* + * Used by portASSERT_IF_INTERRUPT_PRIORITY_INVALID() when ensuring the binary + * point is zero. + */ #define portBINARY_POINT_BITS ( ( uint8_t ) 0x03 ) /* Masks all bits in the APSR other than the mode bits. */ @@ -140,15 +169,19 @@ #define portMAX_8_BIT_VALUE ( ( uint8_t ) 0xff ) #define portBIT_0_SET ( ( uint8_t ) 0x01 ) -/* Let the user override the pre-loading of the initial LR with the address of +/* + * Let the user override the pre-loading of the initial LR with the address of * prvTaskExitError() in case is messes up unwinding of the stack in the - * debugger. */ + * debugger. + */ #ifdef configTASK_RETURN_ADDRESS #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS #else #define portTASK_RETURN_ADDRESS prvTaskExitError #endif +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + /* * The space on the stack required to hold the FPU registers. * @@ -160,7 +193,8 @@ * the size of the bank remains the same. The FPU has also a 32-bit * status register. */ -#define portFPU_REGISTER_WORDS ( ( 16 * 2 ) + 1 ) + #define portFPU_REGISTER_WORDS ( ( 16 * 2 ) + 1 ) +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /*-----------------------------------------------------------*/ @@ -175,6 +209,8 @@ extern void vPortRestoreTaskContext( void ); */ static void prvTaskExitError( void ); +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + /* * If the application provides an implementation of vApplicationIRQHandler(), * then it will get called directly without saving the FPU registers on @@ -194,26 +230,36 @@ static void prvTaskExitError( void ); * FPU registers to be saved on interrupt entry their IRQ handler must be * called vApplicationIRQHandler(). */ -void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) __attribute__((weak) ); + void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) __attribute__( ( weak ) ); +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /*-----------------------------------------------------------*/ -/* A variable is used to keep track of the critical section nesting. This +/* + * A variable is used to keep track of the critical section nesting. This * variable has to be stored as part of the task context and must be initialised to * a non zero value to ensure interrupts don't inadvertently become unmasked before * the scheduler starts. As it is stored as part of the task context it will - * automatically be set to 0 when the first task is started. */ + * automatically be set to 0 when the first task is started. + */ volatile uint32_t ulCriticalNesting = 9999UL; -/* Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then - * a floating point context must be saved and restored for the task. */ -uint32_t ulPortTaskHasFPUContext = pdFALSE; +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + +/* + * Saved as part of the task context. If ulPortTaskHasFPUContext is non-zero then + * a floating point context must be saved and restored for the task. + */ + uint32_t ulPortTaskHasFPUContext = pdFALSE; +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /* Set to 1 to pend a context switch from an ISR. */ uint32_t ulPortYieldRequired = pdFALSE; -/* Counts the interrupt nesting depth. A context switch is only performed if - * if the nesting depth is 0. */ +/* + * Counts the interrupt nesting depth. A context switch is only performed if + * if the nesting depth is 0. + */ uint32_t ulPortInterruptNesting = 0UL; /* Used in asm code. */ @@ -231,12 +277,14 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters ) { - /* Setup the initial stack of the task. The stack is set exactly as + /* + * Setup the initial stack of the task. The stack is set exactly as * expected by the portRESTORE_CONTEXT() macro. * * The fist real value on the stack is the status register, which is set for * system mode, with interrupts enabled. A few NULLs are added first to ensure - * GDB does not try decoding a non-existent return address. */ + * GDB does not try decoding a non-existent return address. + */ *pxTopOfStack = ( StackType_t ) NULL; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) NULL; @@ -285,24 +333,31 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, *pxTopOfStack = ( StackType_t ) 0x01010101; /* R1 */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack--; - /* The task will start with a critical nesting count of 0 as interrupts are - * enabled. */ + /* + * The task will start with a critical nesting count of 0 as interrupts are + * enabled. + */ + pxTopOfStack--; *pxTopOfStack = portNO_CRITICAL_NESTING; - #if( configUSE_TASK_FPU_SUPPORT == 1 ) + #if ( configUSE_TASK_FPU_SUPPORT == 1 ) { - /* The task will start without a floating point context. A task that - uses the floating point hardware must call vPortTaskUsesFPU() before - executing any floating point instructions. */ + /* + * The task will start without a floating point context. + * A task that uses the floating point hardware must call + * vPortTaskUsesFPU() before executing any floating point + * instructions. + */ pxTopOfStack--; *pxTopOfStack = portNO_FLOATING_POINT_CONTEXT; } - #elif( configUSE_TASK_FPU_SUPPORT == 2 ) + #elif ( configUSE_TASK_FPU_SUPPORT == 2 ) { - /* The task will start with a floating point context. Leave enough - space for the registers - and ensure they are initialized to 0. */ + /* + * The task will start with a floating point context. Leave enough + * space for the registers and ensure they are initialized to 0. + */ pxTopOfStack -= portFPU_REGISTER_WORDS; memset( pxTopOfStack, 0x00, portFPU_REGISTER_WORDS * sizeof( StackType_t ) ); @@ -310,9 +365,9 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, *pxTopOfStack = pdTRUE; ulPortTaskHasFPUContext = pdTRUE; } - #else + #elif ( configUSE_TASK_FPU_SUPPORT != 0 ) { - #error Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 1, 2, or left undefined. + #error Invalid configUSE_TASK_FPU_SUPPORT setting - configUSE_TASK_FPU_SUPPORT must be set to 0, 1, or 2. } #endif /* configUSE_TASK_FPU_SUPPORT */ @@ -322,12 +377,14 @@ StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, static void prvTaskExitError( void ) { - /* A function that implements a task must not exit or attempt to return to + /* + * A function that implements a task must not exit or attempt to return to * its caller as there is nothing to return to. If a task wants to exit it * should instead call vTaskDelete( NULL ). * * Artificially force an assert() to be triggered if configASSERT() is - * defined, then stop here so application writers can catch the error. */ + * defined, then stop here so application writers can catch the error. + */ configASSERT( ulPortInterruptNesting == ~0UL ); portDISABLE_INTERRUPTS(); @@ -337,11 +394,15 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ -void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) -{ - ( void ) ulICCIAR; - configASSERT( ( volatile void * ) NULL ); -} +#if ( configUSE_TASK_FPU_SUPPORT != 0 ) + + void vApplicationFPUSafeIRQHandler( uint32_t ulICCIAR ) /* __attribute__( ( weak ) ) */ + { + ( void ) ulICCIAR; + configASSERT( ( volatile void * ) NULL ); + } + +#endif /* configUSE_TASK_FPU_SUPPORT != 0 */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) @@ -349,67 +410,82 @@ BaseType_t xPortStartScheduler( void ) uint32_t ulAPSR, ulCycles = 8; /* 8 bits per byte. */ #if ( configASSERT_DEFINED == 1 ) + { + volatile uint32_t ulOriginalPriority; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); + volatile uint8_t ucMaxPriorityValue; + + /* + * Determine how many priority bits are implemented in the GIC. + * Save the interrupt priority value that is about to be clobbered. + */ + ulOriginalPriority = *pucFirstUserPriorityRegister; + + /* + * Determine the number of priority bits available. First write to + * all possible bits. + */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; + + /* Shift to the least significant bits. */ + while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET ) { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine how many priority bits are implemented in the GIC. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; - - /* Determine the number of priority bits available. First write to - * all possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + ucMaxPriorityValue >>= ( uint8_t ) 0x01; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* + * If ulCycles reaches 0 then ucMaxPriorityValue must have been + * read as 0, indicating a misconfiguration. + */ + ulCycles--; - /* Shift to the least significant bits. */ - while( ( ucMaxPriorityValue & portBIT_0_SET ) != portBIT_0_SET ) + if( ulCycles == 0 ) { - ucMaxPriorityValue >>= ( uint8_t ) 0x01; - - /* If ulCycles reaches 0 then ucMaxPriorityValue must have been - * read as 0, indicating a misconfiguration. */ - ulCycles--; - - if( ulCycles == 0 ) - { - break; - } + break; } - - /* Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read - * value. */ - configASSERT( ucMaxPriorityValue == portLOWEST_INTERRUPT_PRIORITY ); - - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; } + + /* + * Sanity check configUNIQUE_INTERRUPT_PRIORITIES matches the read + * value. + */ + configASSERT( ucMaxPriorityValue == portLOWEST_INTERRUPT_PRIORITY ); + + /* + * Restore the clobbered interrupt priority register to its original + * value. + */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ - /* Only continue if the CPU is not in User mode. The CPU must be in a - * Privileged mode for the scheduler to start. */ + /* + * Only continue if the CPU is not in User mode. The CPU must be in a + * Privileged mode for the scheduler to start. + */ __asm volatile ( "MRS %0, APSR" : "=r" ( ulAPSR )::"memory" ); ulAPSR &= portAPSR_MODE_BITS_MASK; configASSERT( ulAPSR != portAPSR_USER_MODE ); if( ulAPSR != portAPSR_USER_MODE ) { - /* Only continue if the binary point value is set to its lowest possible + /* + * Only continue if the binary point value is set to its lowest possible * setting. See the comments in vPortValidateInterruptPriority() below for - * more information. */ + * more information. + */ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE ); if( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE ) { - /* Interrupts are turned off in the CPU itself to ensure tick does + /* + * Interrupts are turned off in the CPU itself to ensure tick does * not execute while the scheduler is being started. Interrupts are * automatically turned back on in the CPU when the first task starts - * executing. */ + * executing. + */ portCPU_IRQ_DISABLE(); /* Start the timer that generates the tick ISR. */ @@ -420,20 +496,25 @@ BaseType_t xPortStartScheduler( void ) } } - /* Will only get here if vTaskStartScheduler() was called with the CPU in + /* + * Will only get here if vTaskStartScheduler() was called with the CPU in * a non-privileged mode or the binary point register was not set to its lowest * possible value. prvTaskExitError() is referenced to prevent a compiler * warning about it being defined but not referenced in the case that the user - * defines their own exit address. */ + * defines their own exit address. + */ ( void ) prvTaskExitError; + return 0; } /*-----------------------------------------------------------*/ void vPortEndScheduler( void ) { - /* Not implemented in ports where there is nothing to return to. - * Artificially force an assert. */ + /* + * Not implemented in ports where there is nothing to return to. + * Artificially force an assert. + */ configASSERT( ulCriticalNesting == 1000UL ); } /*-----------------------------------------------------------*/ @@ -443,16 +524,20 @@ void vPortEnterCritical( void ) /* Mask interrupts up to the max syscall interrupt priority. */ ulPortSetInterruptMask(); - /* Now interrupts are disabled ulCriticalNesting can be accessed + /* + * Now interrupts are disabled ulCriticalNesting can be accessed * directly. Increment ulCriticalNesting to keep a count of how many times - * portENTER_CRITICAL() has been called. */ + * portENTER_CRITICAL() has been called. + */ ulCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so + /* + * This is not the interrupt safe version of the enter critical function so * assert() if it is being called from an interrupt context. Only API * functions that end in "FromISR" can be used in an interrupt. Only assert if * the critical nesting count is 1 to protect against recursive calls if the - * assert function also uses a critical section. */ + * assert function also uses a critical section. + */ if( ulCriticalNesting == 1 ) { configASSERT( ulPortInterruptNesting == 0 ); @@ -464,16 +549,19 @@ void vPortExitCritical( void ) { if( ulCriticalNesting > portNO_CRITICAL_NESTING ) { - /* Decrement the nesting count as the critical section is being - * exited. */ + /* Decrement the nesting count as the critical section is being exited. */ ulCriticalNesting--; - /* If the nesting level has reached zero then all interrupt - * priorities must be re-enabled. */ + /* + * If the nesting level has reached zero then all interrupt + * priorities must be re-enabled. + */ if( ulCriticalNesting == portNO_CRITICAL_NESTING ) { - /* Critical nesting has reached zero so all interrupt priorities - * should be unmasked. */ + /* + * Critical nesting has reached zero so all interrupt priorities + * should be unmasked. + */ portCLEAR_INTERRUPT_MASK(); } } @@ -482,11 +570,13 @@ void vPortExitCritical( void ) void FreeRTOS_Tick_Handler( void ) { - /* Set interrupt mask before altering scheduler structures. The tick + /* + * Set interrupt mask before altering scheduler structures. The tick * handler runs at the lowest priority, so interrupts cannot already be masked, * so there is no need to save and restore the current mask value. It is * necessary to turn off interrupts in the CPU itself while the ICCPMR is being - * updated. */ + * updated. + */ portCPU_IRQ_DISABLE(); portICCPMR_PRIORITY_MASK_REGISTER = ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ); __asm volatile ( "dsb \n" @@ -505,21 +595,23 @@ void FreeRTOS_Tick_Handler( void ) } /*-----------------------------------------------------------*/ -#if( configUSE_TASK_FPU_SUPPORT != 2 ) +#if ( configUSE_TASK_FPU_SUPPORT == 1 ) void vPortTaskUsesFPU( void ) { uint32_t ulInitialFPSCR = 0; - /* A task is registering the fact that it needs an FPU context. Set the - * FPU flag (which is saved as part of the task context). */ + /* + * A task is registering the fact that it needs an FPU context. Set the + * FPU flag (which is saved as part of the task context). + */ ulPortTaskHasFPUContext = pdTRUE; /* Initialise the floating point status register. */ __asm volatile ( "FMXR FPSCR, %0" ::"r" ( ulInitialFPSCR ) : "memory" ); } -#endif /* configUSE_TASK_FPU_SUPPORT */ +#endif /* configUSE_TASK_FPU_SUPPORT == 1 */ /*-----------------------------------------------------------*/ void vPortClearInterruptMask( uint32_t ulNewMaskValue ) @@ -535,8 +627,7 @@ uint32_t ulPortSetInterruptMask( void ) { uint32_t ulReturn; - /* Interrupt in the CPU must be turned off while the ICCPMR is being - * updated. */ + /* Interrupts must be masked while ICCPMR is updated. */ portCPU_IRQ_DISABLE(); if( portICCPMR_PRIORITY_MASK_REGISTER == ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ) @@ -562,7 +653,8 @@ uint32_t ulPortSetInterruptMask( void ) void vPortValidateInterruptPriority( void ) { - /* The following assertion will fail if a service routine (ISR) for + /* + * The following assertion will fail if a service routine (ISR) for * an interrupt that has been assigned a priority above * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API * function. ISR safe FreeRTOS API functions must *only* be called @@ -575,11 +667,13 @@ uint32_t ulPortSetInterruptMask( void ) * configMAX_SYSCALL_INTERRUPT_PRIORITY. * * FreeRTOS maintains separate thread and ISR API functions to ensure - * interrupt entry is as fast and simple as possible. */ + * interrupt entry is as fast and simple as possible. + */ configASSERT( portICCRPR_RUNNING_PRIORITY_REGISTER >= ( uint32_t ) ( configMAX_API_CALL_INTERRUPT_PRIORITY << portPRIORITY_SHIFT ) ); - /* Priority grouping: The interrupt controller (GIC) allows the bits + /* + * Priority grouping: The interrupt controller (GIC) allows the bits * that define each interrupt's priority to be split between bits that * define the interrupt's pre-emption priority bits and bits that define * the interrupt's sub-priority. For simplicity all bits must be defined @@ -588,7 +682,8 @@ uint32_t ulPortSetInterruptMask( void ) * * The priority grouping is configured by the GIC's binary point register * (ICCBPR). Writing 0 to ICCBPR will ensure it is set to its lowest - * possible value (which may be above 0). */ + * possible value (which may be above 0). + */ configASSERT( ( portICCBPR_BINARY_POINT_REGISTER & portBINARY_POINT_BITS ) <= portMAX_BINARY_POINT_VALUE ); } diff --git a/portable/GCC/ARM_CR5/portASM.S b/portable/GCC/ARM_CR5/portASM.S index c44ea6b7913..c331057d610 100644 --- a/portable/GCC/ARM_CR5/portASM.S +++ b/portable/GCC/ARM_CR5/portASM.S @@ -45,7 +45,10 @@ .extern vTaskSwitchContext .extern vApplicationIRQHandler .extern ulPortInterruptNesting + +#if defined( __ARM_FP ) .extern ulPortTaskHasFPUContext +#endif /* __ARM_FP */ .global FreeRTOS_IRQ_Handler .global FreeRTOS_SWI_Handler @@ -64,20 +67,21 @@ LDR R1, [R2] PUSH {R1} - /* Does the task have a floating point context that needs saving? If - ulPortTaskHasFPUContext is 0 then no. */ - LDR R2, ulPortTaskHasFPUContextConst - LDR R3, [R2] - CMP R3, #0 + #if defined( __ARM_FP ) + /* Does the task have a floating point context that needs saving? If + ulPortTaskHasFPUContext is 0 then no. */ + LDR R2, ulPortTaskHasFPUContextConst + LDR R3, [R2] + CMP R3, #0 - /* Save the floating point context, if any. */ - FMRXNE R1, FPSCR - VPUSHNE {D0-D15} - /*VPUSHNE {D16-D31}*/ - PUSHNE {R1} + /* Save the floating point context, if any. */ + FMRXNE R1, FPSCR + VPUSHNE {D0-D15} + PUSHNE {R1} - /* Save ulPortTaskHasFPUContext itself. */ - PUSH {R3} + /* Save ulPortTaskHasFPUContext itself. */ + PUSH {R3} + #endif /* __ARM_FP */ /* Save the stack pointer in the TCB. */ LDR R0, pxCurrentTCBConst @@ -95,18 +99,21 @@ LDR R1, [R0] LDR SP, [R1] - /* Is there a floating point context to restore? If the restored - ulPortTaskHasFPUContext is zero then no. */ - LDR R0, ulPortTaskHasFPUContextConst - POP {R1} - STR R1, [R0] - CMP R1, #0 - - /* Restore the floating point context, if any. */ - POPNE {R0} - /*VPOPNE {D16-D31}*/ - VPOPNE {D0-D15} - VMSRNE FPSCR, R0 + #if defined( __ARM_FP ) + /* + * Is there a floating point context to restore? If the restored + * ulPortTaskHasFPUContext is zero then no. + */ + LDR R0, ulPortTaskHasFPUContextConst + POP {R1} + STR R1, [R0] + CMP R1, #0 + + /* Restore the floating point context, if any. */ + POPNE {R0} + VPOPNE {D0-D15} + VMSRNE FPSCR, R0 + #endif /* __ARM_FP */ /* Restore the critical section nesting depth. */ LDR R0, ulCriticalNestingConst @@ -132,8 +139,6 @@ .endm - - /****************************************************************************** * SVC handler is used to start the scheduler. *****************************************************************************/ @@ -279,22 +284,25 @@ switch_before_exit: * FPU registers to be saved on interrupt entry their IRQ handler must be * called vApplicationIRQHandler(). *****************************************************************************/ - .align 4 .weak vApplicationIRQHandler .type vApplicationIRQHandler, %function vApplicationIRQHandler: + PUSH {LR} - FMRX R1, FPSCR - VPUSH {D0-D15} - PUSH {R1} - LDR r1, vApplicationFPUSafeIRQHandlerConst - BLX r1 + #if defined( __ARM_FP ) + FMRX R1, FPSCR + VPUSH {D0-D15} + PUSH {R1} - POP {R0} - VPOP {D0-D15} - VMSR FPSCR, R0 + LDR r1, vApplicationFPUSafeIRQHandlerConst + BLX r1 + + POP {R0} + VPOP {D0-D15} + VMSR FPSCR, R0 + #endif /* __ARM_FP */ POP {PC} @@ -303,11 +311,15 @@ ulICCEOIRConst: .word ulICCEOIR ulICCPMRConst: .word ulICCPMR pxCurrentTCBConst: .word pxCurrentTCB ulCriticalNestingConst: .word ulCriticalNesting -ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext + +#if defined( __ARM_FP ) + ulPortTaskHasFPUContextConst: .word ulPortTaskHasFPUContext + vApplicationFPUSafeIRQHandlerConst: .word vApplicationFPUSafeIRQHandler +#endif /* __ARM_FP */ + ulMaxAPIPriorityMaskConst: .word ulMaxAPIPriorityMask vTaskSwitchContextConst: .word vTaskSwitchContext vApplicationIRQHandlerConst: .word vApplicationIRQHandler ulPortInterruptNestingConst: .word ulPortInterruptNesting -vApplicationFPUSafeIRQHandlerConst: .word vApplicationFPUSafeIRQHandler .end diff --git a/portable/GCC/ARM_CR5/portmacro.h b/portable/GCC/ARM_CR5/portmacro.h index 4bd25bb048b..ff7337d1502 100644 --- a/portable/GCC/ARM_CR5/portmacro.h +++ b/portable/GCC/ARM_CR5/portmacro.h @@ -27,11 +27,13 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -44,161 +46,175 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +typedef uint32_t TickType_t; +#define portMAX_DELAY ( TickType_t ) 0xffffffffUL /*-----------------------------------------------------------*/ /* Hardware specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ /* Task utilities. */ /* Called at the end of an ISR that can cause a context switch. */ - #define portEND_SWITCHING_ISR( xSwitchRequired ) \ - { \ - extern uint32_t ulPortYieldRequired; \ - \ - if( xSwitchRequired != pdFALSE ) \ - { \ - ulPortYieldRequired = pdTRUE; \ - } \ +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + { \ + extern uint32_t ulPortYieldRequired; \ + \ + if( xSwitchRequired != pdFALSE ) \ + { \ + ulPortYieldRequired = pdTRUE; \ + } \ } - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) - #define portYIELD() __asm volatile ( "SWI 0" ::: "memory" ); +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() __asm volatile ( "SWI 0" ::: "memory" ); /*----------------------------------------------------------- * Critical section control *----------------------------------------------------------*/ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulPortSetInterruptMask( void ); - extern void vPortClearInterruptMask( uint32_t ulNewMaskValue ); - extern void vPortInstallFreeRTOSVectorTable( void ); - -/* These macros do not globally disable/enable interrupts. They do mask off - * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. */ - #define portENTER_CRITICAL() vPortEnterCritical(); - #define portEXIT_CRITICAL() vPortExitCritical(); - #define portDISABLE_INTERRUPTS() ulPortSetInterruptMask() - #define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 ) - #define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x ) +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulPortSetInterruptMask( void ); +extern void vPortClearInterruptMask( uint32_t ulNewMaskValue ); +extern void vPortInstallFreeRTOSVectorTable( void ); + +/* + * These macros do not globally disable/enable interrupts. They do mask off + * interrupts that have a priority below configMAX_API_CALL_INTERRUPT_PRIORITY. + */ +#define portENTER_CRITICAL() vPortEnterCritical(); +#define portEXIT_CRITICAL() vPortExitCritical(); +#define portDISABLE_INTERRUPTS() ulPortSetInterruptMask() +#define portENABLE_INTERRUPTS() vPortClearInterruptMask( 0 ) +#define portSET_INTERRUPT_MASK_FROM_ISR() ulPortSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vPortClearInterruptMask( x ) /*-----------------------------------------------------------*/ -/* Task function macros as described on the FreeRTOS.org WEB site. These are +/* + * Task function macros as described on the FreeRTOS.org WEB site. These are * not required for this port but included in case common demo code that uses these - * macros is used. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) - -/* Prototype of the FreeRTOS tick handler. This must be installed as the - * handler for whichever peripheral is used to generate the RTOS tick. */ - void FreeRTOS_Tick_Handler( void ); - -/* If configUSE_TASK_FPU_SUPPORT is set to 1 (or left undefined) then tasks are -created without an FPU context and must call vPortTaskUsesFPU() to give -themselves an FPU context before using any FPU instructions. If -configUSE_TASK_FPU_SUPPORT is set to 2 then all tasks will have an FPU context -by default. */ -#if( configUSE_TASK_FPU_SUPPORT != 2 ) + * macros is used. + */ +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) + +/* + * Prototype of the FreeRTOS tick handler. This must be installed as the + * handler for whichever peripheral is used to generate the RTOS tick. + */ +void FreeRTOS_Tick_Handler( void ); + +/* + * If configUSE_TASK_FPU_SUPPORT is set to 1, then tasks are created without an + * FPU context and must call vPortTaskUsesFPU() to allocate an FPU context + * prior to any FPU instructions. If configUSE_TASK_FPU_SUPPORT is set to 2, + * then all tasks have an FPU context allocated by default. + */ +#if ( configUSE_TASK_FPU_SUPPORT == 1 ) void vPortTaskUsesFPU( void ); -#else - /* Each task has an FPU context already, so define this function away to - nothing to prevent it being called accidentally. */ + #define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() +#elif ( configUSE_TASK_FPU_SUPPORT == 2 ) + +/* + * Each task has an FPU context already, so define this function away to + * prevent it being called accidentally. + */ #define vPortTaskUsesFPU() + #define portTASK_USES_FLOATING_POINT() #endif /* configUSE_TASK_FPU_SUPPORT */ - #define portTASK_USES_FLOATING_POINT() vPortTaskUsesFPU() - #define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL ) - #define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL ) +#define portLOWEST_INTERRUPT_PRIORITY ( ( ( uint32_t ) configUNIQUE_INTERRUPT_PRIORITIES ) - 1UL ) +#define portLOWEST_USABLE_INTERRUPT_PRIORITY ( portLOWEST_INTERRUPT_PRIORITY - 1UL ) /* Architecture specific optimisations. */ - #ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION - #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 - #endif +#ifndef configUSE_PORT_OPTIMISED_TASK_SELECTION + #define configUSE_PORT_OPTIMISED_TASK_SELECTION 1 +#endif - #if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 +#if configUSE_PORT_OPTIMISED_TASK_SELECTION == 1 /* Store/clear the ready priorities in a bit map. */ - #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) - #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) + #define portRECORD_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) |= ( 1UL << ( uxPriority ) ) + #define portRESET_READY_PRIORITY( uxPriority, uxReadyPriorities ) ( uxReadyPriorities ) &= ~( 1UL << ( uxPriority ) ) /*-----------------------------------------------------------*/ - #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) ) - - #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ - - #ifdef configASSERT - void vPortValidateInterruptPriority( void ); - #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() - #endif /* configASSERT */ + #define portGET_HIGHEST_PRIORITY( uxTopPriority, uxReadyPriorities ) uxTopPriority = ( 31 - __builtin_clz( uxReadyPriorities ) ) - #define portNOP() __asm volatile ( "NOP" ) +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ +#ifdef configASSERT + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() +#endif /* configASSERT */ - #ifdef __cplusplus - } /* extern C */ - #endif +#define portNOP() __asm volatile ( "NOP" ) -/* The number of bits to shift for an interrupt priority is dependent on the - * number of bits implemented by the interrupt controller. */ - #if configUNIQUE_INTERRUPT_PRIORITIES == 16 - #define portPRIORITY_SHIFT 4 - #define portMAX_BINARY_POINT_VALUE 3 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 32 - #define portPRIORITY_SHIFT 3 - #define portMAX_BINARY_POINT_VALUE 2 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 64 - #define portPRIORITY_SHIFT 2 - #define portMAX_BINARY_POINT_VALUE 1 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 128 - #define portPRIORITY_SHIFT 1 - #define portMAX_BINARY_POINT_VALUE 0 - #elif configUNIQUE_INTERRUPT_PRIORITIES == 256 - #define portPRIORITY_SHIFT 0 - #define portMAX_BINARY_POINT_VALUE 0 - #else /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ - #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware - #endif /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ +/* + * The number of bits to shift for an interrupt priority is dependent on the + * number of bits implemented by the interrupt controller. + */ +#if configUNIQUE_INTERRUPT_PRIORITIES == 16 + #define portPRIORITY_SHIFT 4 + #define portMAX_BINARY_POINT_VALUE 3 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 32 + #define portPRIORITY_SHIFT 3 + #define portMAX_BINARY_POINT_VALUE 2 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 64 + #define portPRIORITY_SHIFT 2 + #define portMAX_BINARY_POINT_VALUE 1 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 128 + #define portPRIORITY_SHIFT 1 + #define portMAX_BINARY_POINT_VALUE 0 +#elif configUNIQUE_INTERRUPT_PRIORITIES == 256 + #define portPRIORITY_SHIFT 0 + #define portMAX_BINARY_POINT_VALUE 0 +#else /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ + #error Invalid configUNIQUE_INTERRUPT_PRIORITIES setting. configUNIQUE_INTERRUPT_PRIORITIES must be set to the number of unique priorities implemented by the target hardware +#endif /* if configUNIQUE_INTERRUPT_PRIORITIES == 16 */ /* Interrupt controller access addresses. */ - #define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 ) - #define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C ) - #define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 ) - #define portICCBPR_BINARY_POINT_OFFSET ( 0x08 ) - #define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 ) - - #define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET ) - #define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) ) - #define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ) - #define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET ) - #define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) - #define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) ) - #define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) ) - - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portICCPMR_PRIORITY_MASK_OFFSET ( 0x04 ) +#define portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ( 0x0C ) +#define portICCEOIR_END_OF_INTERRUPT_OFFSET ( 0x10 ) +#define portICCBPR_BINARY_POINT_OFFSET ( 0x08 ) +#define portICCRPR_RUNNING_PRIORITY_OFFSET ( 0x14 ) + +#define portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + configINTERRUPT_CONTROLLER_CPU_INTERFACE_OFFSET ) +#define portICCPMR_PRIORITY_MASK_REGISTER ( *( ( volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) ) ) +#define portICCIAR_INTERRUPT_ACKNOWLEDGE_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCIAR_INTERRUPT_ACKNOWLEDGE_OFFSET ) +#define portICCEOIR_END_OF_INTERRUPT_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCEOIR_END_OF_INTERRUPT_OFFSET ) +#define portICCPMR_PRIORITY_MASK_REGISTER_ADDRESS ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCPMR_PRIORITY_MASK_OFFSET ) +#define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) ) +#define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) ) + +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } /* extern C */ +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ From 309a18a03adab1eaca5d86654dd4e1075ee49e7a Mon Sep 17 00:00:00 2001 From: Kody Stribrny <89810515+kstribrnAmzn@users.noreply.github.com> Date: Mon, 6 Mar 2023 20:04:15 -0800 Subject: [PATCH 23/62] Fix freertos_kernel cmake property, Posix Port (#640) * Fix freertos_kernel cmake property, Posix Port * Moves the `set_property()` call below the target definition in top level CMakeLists file * Corrects billion value from `ULL` suffix (not C90 compliant) to `UL` suffix with cast to uint64_t * Add blank line to CMakeLists.txt --- CMakeLists.txt | 8 ++++---- portable/ThirdParty/GCC/Posix/port.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b51b7aaacdd..6d83db88b9c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -228,10 +228,6 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " freertos_kernel)") endif() -######################################################################## -# Requirements -set_property(TARGET freertos_kernel PROPERTY C_STANDARD 90) - ######################################################################## # Overall Compile Options # Note the compile option strategy is to error on everything and then @@ -294,3 +290,7 @@ target_link_libraries(freertos_kernel $<$:freertos_config> freertos_kernel_port ) + +######################################################################## +# Requirements +set_property(TARGET freertos_kernel PROPERTY C_STANDARD 90) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index 12076b9898d..d634c8b264e 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -343,7 +343,7 @@ static uint64_t prvGetTimeNs( void ) clock_gettime( CLOCK_MONOTONIC, &t ); - return ( uint64_t )t.tv_sec * 1000000000ULL + ( uint64_t )t.tv_nsec; + return ( uint64_t )t.tv_sec * ( uint64_t )1000000000UL + ( uint64_t )t.tv_nsec; } static uint64_t prvStartTimeNs; From 55658e15250132fd5671dedbbc43bd69eab3b3d9 Mon Sep 17 00:00:00 2001 From: Holden Date: Sat, 11 Mar 2023 12:34:15 -0500 Subject: [PATCH 24/62] Add missing FreeRTOS+ defines --- include/projdefs.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/projdefs.h b/include/projdefs.h index 67fc5351937..c81ad568436 100644 --- a/include/projdefs.h +++ b/include/projdefs.h @@ -44,6 +44,10 @@ typedef void (* TaskFunction_t)( void * ); #define pdFALSE ( ( BaseType_t ) 0 ) #define pdTRUE ( ( BaseType_t ) 1 ) +#define pdFALSE_SIGNED ( ( BaseType_t ) 0 ) +#define pdTRUE_SIGNED ( ( BaseType_t ) 1 ) +#define pdFALSE_UNSIGNED ( ( UBaseType_t ) 0 ) +#define pdTRUE_UNSIGNED ( ( UBaseType_t ) 1 ) #define pdPASS ( pdTRUE ) #define pdFAIL ( pdFALSE ) @@ -100,6 +104,7 @@ typedef void (* TaskFunction_t)( void * ); #define pdFREERTOS_ERRNO_ENOTEMPTY 90 /* Directory not empty */ #define pdFREERTOS_ERRNO_ENAMETOOLONG 91 /* File or path name too long */ #define pdFREERTOS_ERRNO_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define pdFREERTOS_ERRNO_EAFNOSUPPORT 97 /* Address family not supported by protocol */ #define pdFREERTOS_ERRNO_ENOBUFS 105 /* No buffer space available */ #define pdFREERTOS_ERRNO_ENOPROTOOPT 109 /* Protocol not available */ #define pdFREERTOS_ERRNO_EADDRINUSE 112 /* Address already in use */ From d4d5e43292a57df354dcf8f33d938283f2082855 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Fri, 17 Mar 2023 08:22:34 +0530 Subject: [PATCH 25/62] Run kernel demos and unit tests for PR changes (#645) * Run kernel demos and unit tests for PR changes Kernel demos check builds multiple demos from FreeRTOS/FreeRTOS and unit tests check runs unit tests in FreeRTOS/FreeRTOS. Both of these checks currently use main branch of FreeRTOS-Kernel. This commits updates these checks to use the changes in the PR. Signed-off-by: Gaurav Aggarwal * Do not specify PR SHA explicitly as that is default Signed-off-by: Gaurav Aggarwal * Remove explicit PR SHA from kernel checks Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Gaurav Aggarwal --- .github/workflows/kernel-checks.yml | 1 - .github/workflows/kernel-demos.yml | 42 +++++++++++++++++------------ .github/workflows/unit-tests.yml | 4 ++- 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/.github/workflows/kernel-checks.yml b/.github/workflows/kernel-checks.yml index b295033d28a..a4627f0e07c 100644 --- a/.github/workflows/kernel-checks.yml +++ b/.github/workflows/kernel-checks.yml @@ -28,7 +28,6 @@ jobs: - name: Checkout Pull Request uses: actions/checkout@v2 with: - ref: ${{ github.event.pull_request.head.sha }} path: inspect # Collect all affected files diff --git a/.github/workflows/kernel-demos.yml b/.github/workflows/kernel-demos.yml index d6eace575ae..96a4c59759e 100644 --- a/.github/workflows/kernel-demos.yml +++ b/.github/workflows/kernel-demos.yml @@ -14,10 +14,11 @@ jobs: submodules: 'recursive' fetch-depth: 1 - - name: Fetch Kernel Submodule - shell: bash - run: | - git submodule update --checkout --init --depth 1 FreeRTOS/Source + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source - name: Add msbuild to PATH uses: microsoft/setup-msbuild@v1.1 @@ -42,10 +43,11 @@ jobs: submodules: 'recursive' fetch-depth: 1 - - name: Fetch Kernel Submodule - shell: bash - run: | - git submodule update --checkout --init --depth 1 FreeRTOS/Source + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source - name: Build WIN32-MingW Demo working-directory: FreeRTOS/Demo/WIN32-MingW @@ -66,9 +68,11 @@ jobs: submodules: 'recursive' fetch-depth: 1 - - name: Fetch Kernel Submodule - shell: bash - run: git submodule update --checkout --init --depth 1 FreeRTOS/Source + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source - name: Install GCC shell: bash @@ -93,9 +97,11 @@ jobs: submodules: 'recursive' fetch-depth: 1 - - name: Fetch Kernel Submodule - shell: bash - run: git submodule update --checkout --init --depth 1 FreeRTOS/Source + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source - name: Install MSP430 Toolchain shell: bash @@ -120,9 +126,11 @@ jobs: submodules: 'recursive' fetch-depth: 1 - - name: Fetch Kernel Submodule - shell: bash - run: git submodule update --checkout --init --depth 1 FreeRTOS/Source + # Checkout user pull request changes + - name: Checkout Pull Request + uses: actions/checkout@v2 + with: + path: ./FreeRTOS/Source - name: Install GNU ARM Toolchain shell: bash diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index a714b757256..73e1808fa25 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -12,7 +12,9 @@ jobs: repository: FreeRTOS/FreeRTOS submodules: 'recursive' fetch-depth: 1 - - name: Clone This Repo + + # Checkout user pull request changes + - name: Checkout Pull Request uses: actions/checkout@v2 with: path: ./FreeRTOS/Source From 9488ba22d833f2c0746903dfc312b5add887ec8b Mon Sep 17 00:00:00 2001 From: Darian <32921628+Dazza0@users.noreply.github.com> Date: Thu, 23 Mar 2023 06:27:57 +0800 Subject: [PATCH 26/62] Add functions to get the buffers of statically created objects (#641) Added various ...GetStaticBuffer() functions to get the buffers of statically created objects. --------- Co-authored-by: Paul Bartell Co-authored-by: Nikhil Kamath <110539926+amazonKamath@users.noreply.github.com> Co-authored-by: Gaurav Aggarwal --- .github/lexicon.txt | 19 ++++++++++++++++ event_groups.c | 36 +++++++++++++++++++++++++++++ include/event_groups.h | 22 ++++++++++++++++++ include/message_buffer.h | 31 +++++++++++++++++++++++++ include/queue.h | 41 +++++++++++++++++++++++++++++++++ include/semphr.h | 21 +++++++++++++++++ include/stream_buffer.h | 32 ++++++++++++++++++++++++++ include/task.h | 30 ++++++++++++++++++++++++ include/timers.h | 20 ++++++++++++++++ queue.c | 49 ++++++++++++++++++++++++++++++++++++++++ stream_buffer.c | 28 +++++++++++++++++++++++ tasks.c | 47 ++++++++++++++++++++++++++++++++++++++ timers.c | 24 ++++++++++++++++++++ 13 files changed, 400 insertions(+) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 47d2349bfff..e7ded970dbd 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1464,13 +1464,24 @@ ppdc ppio ppitc ppmc +ppucmessagebufferstoragearea +ppucqueuestorage +ppucstreambufferstoragearea ppudr ppuer ppusr +ppuxstackbuffer ppvdestination ppwm +ppxeventgroupbuffer ppxidletaskstackbuffer ppxidletasktcbbuffer +ppxsemaphorebuffer +ppxstaticmessagebuffer +ppxstaticqueue +ppxstaticstreambuffer +ppxtaskbuffer +ppxtimerbuffer ppxtimertaskstackbuffer ppxtimertasktcbbuffer pr @@ -2723,6 +2734,7 @@ xeventgroupcreatestatic xeventgroupdelete xeventgroupgetbits xeventgroupgetbitsfromisr +xeventgroupgetstaticbuffer xeventgroupsetbits xeventgroupsetbitsfromisr xeventgroupsync @@ -2796,6 +2808,7 @@ xmessage xmessagebuffer xmessagebuffercreate xmessagebuffercreatestatic +xmessagebuffergetstaticbuffers xmessagebufferisempty xmessagebufferisfull xmessagebuffernextlengthbytes @@ -2865,6 +2878,7 @@ xqueuecreatestatic xqueuegenericsend xqueuegenericsendfromisr xqueuegetmutexholder +xqueuegetstaticbuffers xqueuegivefromisr xqueuegivemutexrecursive xqueueorsemaphore @@ -2919,6 +2933,7 @@ xsemaphorecreaterecursivemutex xsemaphorecreaterecursivemutexstatic xsemaphoregetmutexholder xsemaphoregetmutexholderfromisr +xsemaphoregetstaticbuffer xsemaphoregive xsemaphoregivefromisr xsemaphoregivemutexrecursive @@ -2943,6 +2958,7 @@ xstreambuffer xstreambufferbytesavailable xstreambuffercreate xstreambuffercreatestatic +xstreambuffergetstaticbuffers xstreambufferisempty xstreambufferisfull xstreambuffernextmessagelengthbytes @@ -2981,6 +2997,7 @@ xtaskgetcurrenttaskhandle xtaskgethandle xtaskgetidletaskhandle xtaskgetschedulerstate +xtaskgetstaticbuffers xtaskgettickcount xtaskgettickcountfromisr xtaskhandle @@ -3048,6 +3065,7 @@ xtimerdelete xtimergetexpirytime xtimergetperiod xtimergetreloadmode +xtimergetstaticbuffer xtimergettimerdaemontaskhandle xtimeristimeractive xtimerlistitem @@ -3081,3 +3099,4 @@ xwritevalue xxr xyieldpending xzr + diff --git a/event_groups.c b/event_groups.c index 7c86e605a88..5c4b4297baf 100644 --- a/event_groups.c +++ b/event_groups.c @@ -677,6 +677,42 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) } /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) + { + BaseType_t xReturn; + EventGroup_t * pxEventBits = xEventGroup; + + configASSERT( pxEventBits ); + configASSERT( ppxEventGroupBuffer ); + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Check if the event group was statically allocated. */ + if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdTRUE ) + { + *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + #else /* configSUPPORT_DYNAMIC_ALLOCATION */ + { + /* Event group must have been statically allocated. */ + *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits; + xReturn = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + return xReturn; + } +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + /* For internal use only - execute a 'set bits' command that was pended from * an interrupt. */ void vEventGroupSetBitsCallback( void * pvEventGroup, diff --git a/include/event_groups.h b/include/event_groups.h index 3f37d90be5d..47572ce94e4 100644 --- a/include/event_groups.h +++ b/include/event_groups.h @@ -763,6 +763,28 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEG */ void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; +/** + * event_groups.h + * @code{c} + * BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + * StaticEventGroup_t ** ppxEventGroupBuffer ); + * @endcode + * + * Retrieve a pointer to a statically created event groups's data structure + * buffer. It is the same buffer that is supplied at the time of creation. + * + * @param xEventGroup The event group for which to retrieve the buffer. + * + * @param ppxEventGroupBuffer Used to return a pointer to the event groups's + * data structure buffer. + * + * @return pdTRUE if the buffer was retrieved, pdFALSE otherwise. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /* For internal use only. */ void vEventGroupSetBitsCallback( void * pvEventGroup, const uint32_t ulBitsToSet ) PRIVILEGED_FUNCTION; diff --git a/include/message_buffer.h b/include/message_buffer.h index b56dd35964e..74fab118f51 100644 --- a/include/message_buffer.h +++ b/include/message_buffer.h @@ -245,6 +245,37 @@ typedef StreamBufferHandle_t MessageBufferHandle_t; xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), 0, pdTRUE, ( pucMessageBufferStorageArea ), ( pxStaticMessageBuffer ), ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif +/** + * message_buffer.h + * + * @code{c} + * BaseType_t xMessageBufferGetStaticBuffers( MessageBufferHandle_t xMessageBuffer, + * uint8_t ** ppucMessageBufferStorageArea, + * StaticMessageBuffer_t ** ppxStaticMessageBuffer ); + * @endcode + * + * Retrieve pointers to a statically created message buffer's data structure + * buffer and storage area buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xMessageBuffer The message buffer for which to retrieve the buffers. + * + * @param ppucMessageBufferStorageArea Used to return a pointer to the + * message buffer's storage area buffer. + * + * @param ppxStaticMessageBuffer Used to return a pointer to the message + * buffer's data structure buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise.. + * + * \defgroup xMessageBufferGetStaticBuffers xMessageBufferGetStaticBuffers + * \ingroup MessageBufferManagement + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xMessageBufferGetStaticBuffers( xMessageBuffer, ppucMessageBufferStorageArea, ppxStaticMessageBuffer ) \ + xStreamBufferGetStaticBuffers( ( xMessageBuffer ), ( ppucMessageBufferStorageArea ), ( ppxStaticMessageBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * message_buffer.h * diff --git a/include/queue.h b/include/queue.h index f488a2e7641..66c8286aef0 100644 --- a/include/queue.h +++ b/include/queue.h @@ -235,6 +235,35 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t; #define xQueueCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxQueueBuffer ) xQueueGenericCreateStatic( ( uxQueueLength ), ( uxItemSize ), ( pucQueueStorage ), ( pxQueueBuffer ), ( queueQUEUE_TYPE_BASE ) ) #endif /* configSUPPORT_STATIC_ALLOCATION */ +/** + * queue. h + * @code{c} + * BaseType_t xQueueGetStaticBuffers( QueueHandle_t xQueue, + * uint8_t ** ppucQueueStorage, + * StaticQueue_t ** ppxStaticQueue ); + * @endcode + * + * Retrieve pointers to a statically created queue's data structure buffer + * and storage area buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xQueue The queue for which to retrieve the buffers. + * + * @param ppucQueueStorage Used to return a pointer to the queue's storage + * area buffer. + * + * @param ppxStaticQueue Used to return a pointer to the queue's data + * structure buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise. + * + * \defgroup xQueueGetStaticBuffers xQueueGetStaticBuffers + * \ingroup QueueManagement + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xQueueGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue ) xQueueGenericGetStaticBuffers( ( xQueue ), ( ppucQueueStorage ), ( ppxStaticQueue ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * queue. h * @code{c} @@ -1542,6 +1571,18 @@ BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; #endif +/* + * Generic version of the function used to retrieve the buffers of statically + * created queues. This is called by other functions and macros that retrieve + * the buffers of other statically created RTOS objects that use the queue + * structure as their base. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION; +#endif + /* * Queue sets provide a mechanism to allow a task to block (pend) on a read * operation from multiple queues or semaphores simultaneously. diff --git a/include/semphr.h b/include/semphr.h index c2206fa98ff..7977a01b84e 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -1190,4 +1190,25 @@ typedef QueueHandle_t SemaphoreHandle_t; */ #define uxSemaphoreGetCountFromISR( xSemaphore ) uxQueueMessagesWaitingFromISR( ( QueueHandle_t ) ( xSemaphore ) ) +/** + * semphr.h + * @code{c} + * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore ); + * @endcode + * + * Retrieve pointer to a statically created binary semaphore, counting semaphore, + * or mutex semaphore's data structure buffer. This is the same buffer that is + * supplied at the time of creation. + * + * @param xSemaphore The semaphore for which to retrieve the buffer. + * + * @param ppxSemaphoreBuffer Used to return a pointer to the semaphore's + * data structure buffer. + * + * @return pdTRUE if buffer was retrieved, pdFALSE otherwise. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + #define xSemaphoreGetStaticBuffer( xSemaphore, ppxSemaphoreBuffer ) xQueueGenericGetStaticBuffers( ( QueueHandle_t ) ( xSemaphore ), NULL, ( ppxSemaphoreBuffer ) ) +#endif /* configSUPPORT_STATIC_ALLOCATION */ + #endif /* SEMAPHORE_H */ diff --git a/include/stream_buffer.h b/include/stream_buffer.h index d65ed9e9cec..521c178ef5a 100644 --- a/include/stream_buffer.h +++ b/include/stream_buffer.h @@ -260,6 +260,38 @@ typedef void (* StreamBufferCallbackFunction_t)( StreamBufferHandle_t xStreamBuf xStreamBufferGenericCreateStatic( ( xBufferSizeBytes ), ( xTriggerLevelBytes ), pdFALSE, ( pucStreamBufferStorageArea ), ( pxStaticStreamBuffer ), ( pxSendCompletedCallback ), ( pxReceiveCompletedCallback ) ) #endif +/** + * stream_buffer.h + * + * @code{c} + * BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer, + * uint8_t ** ppucStreamBufferStorageArea, + * StaticStreamBuffer_t ** ppxStaticStreamBuffer ); + * @endcode + * + * Retrieve pointers to a statically created stream buffer's data structure + * buffer and storage area buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xStreamBuffer The stream buffer for which to retrieve the buffers. + * + * @param ppucStreamBufferStorageArea Used to return a pointer to the stream + * buffer's storage area buffer. + * + * @param ppxStaticStreamBuffer Used to return a pointer to the stream + * buffer's data structure buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise. + * + * \defgroup xStreamBufferGetStaticBuffers xStreamBufferGetStaticBuffers + * \ingroup StreamBufferManagement + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer, + uint8_t ** ppucStreamBufferStorageArea, + StaticStreamBuffer_t ** ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * stream_buffer.h * diff --git a/include/task.h b/include/task.h index 08c14dd6ff1..054f43644a1 100644 --- a/include/task.h +++ b/include/task.h @@ -1509,6 +1509,36 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e */ TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +/** + * task. h + * @code{c} + * BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask, + * StackType_t ** ppuxStackBuffer, + * StaticTask_t ** ppxTaskBuffer ); + * @endcode + * + * Retrieve pointers to a statically created task's data structure + * buffer and stack buffer. These are the same buffers that are supplied + * at the time of creation. + * + * @param xTask The task for which to retrieve the buffers. + * + * @param ppuxStackBuffer Used to return a pointer to the task's stack buffer. + * + * @param ppxTaskBuffer Used to return a pointer to the task's data structure + * buffer. + * + * @return pdTRUE if buffers were retrieved, pdFALSE otherwise. + * + * \defgroup xTaskGetStaticBuffers xTaskGetStaticBuffers + * \ingroup TaskUtils + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /** * task.h * @code{c} diff --git a/include/timers.h b/include/timers.h index 6a064d62a41..2967a4674c7 100644 --- a/include/timers.h +++ b/include/timers.h @@ -1323,6 +1323,26 @@ TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; */ TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; +/** + * BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer, + * StaticTimer_t ** ppxTimerBuffer ); + * + * Retrieve pointer to a statically created timer's data structure + * buffer. This is the same buffer that is supplied at the time of + * creation. + * + * @param xTimer The timer for which to retrieve the buffer. + * + * @param ppxTaskBuffer Used to return a pointer to the timers's data + * structure buffer. + * + * @return pdTRUE if the buffer was retrieved, pdFALSE otherwise. + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + /* * Functions beyond this part are not part of the public API and are intended * for use by the kernel only. diff --git a/queue.c b/queue.c index ca6bfbc60dc..662052f5ef6 100644 --- a/queue.c +++ b/queue.c @@ -419,6 +419,55 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue, #endif /* configSUPPORT_STATIC_ALLOCATION */ /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + configASSERT( pxQueue ); + configASSERT( ppxStaticQueue ); + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + { + /* Check if the queue was statically allocated. */ + if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE ) + { + if( ppucQueueStorage != NULL ) + { + *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead; + } + + *ppxStaticQueue = ( StaticQueue_t * ) pxQueue; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + #else /* configSUPPORT_DYNAMIC_ALLOCATION */ + { + /* Queue must have been statically allocated. */ + if( ppucQueueStorage != NULL ) + { + *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead; + } + + *ppxStaticQueue = ( StaticQueue_t * ) pxQueue; + xReturn = pdTRUE; + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + + return xReturn; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, diff --git a/stream_buffer.c b/stream_buffer.c index 0d0b3350acd..938c4051cec 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -472,6 +472,34 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, #endif /* ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffer, + uint8_t ** ppucStreamBufferStorageArea, + StaticStreamBuffer_t ** ppxStaticStreamBuffer ) + { + BaseType_t xReturn; + const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + + configASSERT( pxStreamBuffer ); + configASSERT( ppucStreamBufferStorageArea ); + configASSERT( ppxStaticStreamBuffer ); + + if( ( pxStreamBuffer->ucFlags & sbFLAGS_IS_STATICALLY_ALLOCATED ) != ( uint8_t ) 0 ) + { + *ppucStreamBufferStorageArea = pxStreamBuffer->pucBuffer; + *ppxStaticStreamBuffer = ( StaticStreamBuffer_t * ) pxStreamBuffer; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) { StreamBuffer_t * pxStreamBuffer = xStreamBuffer; diff --git a/tasks.c b/tasks.c index 0e7a56c6058..befd63e2279 100644 --- a/tasks.c +++ b/tasks.c @@ -2489,6 +2489,53 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char #endif /* INCLUDE_xTaskGetHandle */ /*-----------------------------------------------------------*/ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) + { + BaseType_t xReturn; + TCB_t * pxTCB; + + configASSERT( ppuxStackBuffer != NULL ); + configASSERT( ppxTaskBuffer != NULL ); + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 ) + { + if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ) + { + *ppuxStackBuffer = pxTCB->pxStack; + *ppxTaskBuffer = ( StaticTask_t * ) pxTCB; + xReturn = pdTRUE; + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + *ppuxStackBuffer = pxTCB->pxStack; + *ppxTaskBuffer = NULL; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + } + #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */ + { + *ppuxStackBuffer = pxTCB->pxStack; + *ppxTaskBuffer = ( StaticTask_t * ) pxTCB; + xReturn = pdTRUE; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */ + + return xReturn; + } + +#endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + #if ( configUSE_TRACE_FACILITY == 1 ) UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, diff --git a/timers.c b/timers.c index 06a29279597..76a59a8b999 100644 --- a/timers.c +++ b/timers.c @@ -510,6 +510,30 @@ } /*-----------------------------------------------------------*/ + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + BaseType_t xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) + { + BaseType_t xReturn; + Timer_t * pxTimer = xTimer; + + configASSERT( ppxTimerBuffer != NULL ); + + if( ( pxTimer->ucStatus & tmrSTATUS_IS_STATICALLY_ALLOCATED ) != 0 ) + { + *ppxTimerBuffer = ( StaticTimer_t * ) pxTimer; + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + const char * pcTimerGetName( TimerHandle_t xTimer ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ { Timer_t * pxTimer = xTimer; From 99797e14e3c041ce520682694e27533bfafb0cb0 Mon Sep 17 00:00:00 2001 From: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Date: Thu, 23 Mar 2023 15:06:33 +0530 Subject: [PATCH 27/62] Cortex-M Assert when NVIC implements 8 PRIO bits (#639) * Cortex-M Assert when NVIC implements 8 PRIO bits * Fix CM3 ports * Fix ARM_CM3_MPU * Fix ARM CM3 * Fix ARM_CM4_MPU * Fix ARM_CM4 * Fix GCC ARM_CM7 * Fix IAR ARM ports * Uncrustify changes * Fix MikroC_ARM_CM4F port * Fix MikroC_ARM_CM4F port-(2) * Fix RVDS ARM ports * Revert changes for Tasking/ARM_CM4F port * Revert changes for Tasking/ARM_CM4F port-(2) * Update port.c Fix GCC/ARM_CM4F port * Update port.c * update GCC\ARM_CM4F port * update port.c * Assert to check configMAX_SYSCALL_INTERRUPT_PRIORITY is set to higher priority * Fix merge error: remove duplicate code * Fix typos --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Ubuntu --- portable/CCS/ARM_CM3/port.c | 63 +++++-- portable/CCS/ARM_CM4F/port.c | 63 +++++-- portable/GCC/ARM_CM3/port.c | 37 +++- portable/GCC/ARM_CM3_MPU/port.c | 37 +++- portable/GCC/ARM_CM4F/port.c | 73 +++++--- portable/GCC/ARM_CM4_MPU/port.c | 37 +++- portable/GCC/ARM_CM7/r0p1/port.c | 77 +++++--- portable/IAR/ARM_CM3/port.c | 35 +++- portable/IAR/ARM_CM4F/port.c | 35 +++- portable/IAR/ARM_CM4F_MPU/port.c | 293 +++++++++++++++++------------- portable/IAR/ARM_CM7/r0p1/port.c | 35 +++- portable/MikroC/ARM_CM4F/port.c | 35 +++- portable/RVDS/ARM_CM3/port.c | 35 +++- portable/RVDS/ARM_CM4F/port.c | 35 +++- portable/RVDS/ARM_CM4_MPU/port.c | 243 ++++++++++++++----------- portable/RVDS/ARM_CM7/r0p1/port.c | 35 +++- portable/Tasking/ARM_CM4F/port.c | 3 +- 17 files changed, 805 insertions(+), 366 deletions(-) mode change 100644 => 100755 portable/CCS/ARM_CM3/port.c mode change 100644 => 100755 portable/CCS/ARM_CM4F/port.c mode change 100644 => 100755 portable/GCC/ARM_CM3/port.c mode change 100644 => 100755 portable/GCC/ARM_CM3_MPU/port.c mode change 100644 => 100755 portable/GCC/ARM_CM4F/port.c mode change 100644 => 100755 portable/GCC/ARM_CM4_MPU/port.c mode change 100644 => 100755 portable/GCC/ARM_CM7/r0p1/port.c mode change 100644 => 100755 portable/IAR/ARM_CM3/port.c mode change 100644 => 100755 portable/IAR/ARM_CM4F/port.c mode change 100644 => 100755 portable/IAR/ARM_CM4F_MPU/port.c mode change 100644 => 100755 portable/IAR/ARM_CM7/r0p1/port.c mode change 100644 => 100755 portable/MikroC/ARM_CM4F/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM3/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM4F/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM4_MPU/port.c mode change 100644 => 100755 portable/RVDS/ARM_CM7/r0p1/port.c mode change 100644 => 100755 portable/Tasking/ARM_CM4F/port.c diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c old mode 100644 new mode 100755 index dfdb240ed2b..63dc600a756 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -219,6 +219,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -250,20 +251,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -272,7 +299,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -379,9 +406,9 @@ void xPortSysTickHandler( void ) /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* If a context switch is pending or a task is waiting for the scheduler * to be unsuspended then abandon the low power entry. */ @@ -389,7 +416,7 @@ void xPortSysTickHandler( void ) { /* Re-enable interrupts - see comments above the cpsid instruction * above. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } else { @@ -450,9 +477,9 @@ void xPortSysTickHandler( void ) if( xModifiableIdleTime > 0 ) { - __asm( " dsb"); - __asm( " wfi"); - __asm( " isb"); + __asm( " dsb" ); + __asm( " wfi" ); + __asm( " isb" ); } configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); @@ -460,17 +487,17 @@ void xPortSysTickHandler( void ) /* Re-enable interrupts to allow the interrupt that brought the MCU * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ - __asm( " cpsie i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsie i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable interrupts again because the clock is about to be stopped * and interrupts that execute while the clock is stopped will increase * any slippage between the time maintained by the RTOS and calendar * time. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the @@ -578,7 +605,7 @@ void xPortSysTickHandler( void ) vTaskStepTick( ulCompleteTickPeriods ); /* Exit with interrupts enabled. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } } diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c old mode 100644 new mode 100755 index 360e83d7000..a741b091561 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -238,6 +238,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -269,20 +270,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -291,7 +318,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -404,9 +431,9 @@ void xPortSysTickHandler( void ) /* Enter a critical section but don't use the taskENTER_CRITICAL() * method as that will mask interrupts that should exit sleep mode. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* If a context switch is pending or a task is waiting for the scheduler * to be unsuspended then abandon the low power entry. */ @@ -414,7 +441,7 @@ void xPortSysTickHandler( void ) { /* Re-enable interrupts - see comments above the cpsid instruction * above. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } else { @@ -475,9 +502,9 @@ void xPortSysTickHandler( void ) if( xModifiableIdleTime > 0 ) { - __asm( " dsb"); - __asm( " wfi"); - __asm( " isb"); + __asm( " dsb" ); + __asm( " wfi" ); + __asm( " isb" ); } configPOST_SLEEP_PROCESSING( xExpectedIdleTime ); @@ -485,17 +512,17 @@ void xPortSysTickHandler( void ) /* Re-enable interrupts to allow the interrupt that brought the MCU * out of sleep mode to execute immediately. See comments above * the cpsid instruction above. */ - __asm( " cpsie i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsie i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable interrupts again because the clock is about to be stopped * and interrupts that execute while the clock is stopped will increase * any slippage between the time maintained by the RTOS and calendar * time. */ - __asm( " cpsid i"); - __asm( " dsb"); - __asm( " isb"); + __asm( " cpsid i" ); + __asm( " dsb" ); + __asm( " isb" ); /* Disable the SysTick clock without reading the * portNVIC_SYSTICK_CTRL_REG register to ensure the @@ -603,7 +630,7 @@ void xPortSysTickHandler( void ) vTaskStepTick( ulCompleteTickPeriods ); /* Exit with interrupts enabled. */ - __asm( " cpsie i"); + __asm( " cpsie i" ); } } diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c old mode 100644 new mode 100755 index a5cea1e1e1b..57337e60502 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -262,6 +262,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -293,20 +294,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -315,7 +342,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -756,4 +783,4 @@ __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); } -#endif /* configASSERT_DEFINED */ +#endif /* configASSERT_DEFINED */ \ No newline at end of file diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c old mode 100644 new mode 100755 index 5f7d2c81113..4b21236e785 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -385,6 +385,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -416,20 +417,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -438,7 +465,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -923,4 +950,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ +/*-----------------------------------------------------------*/ \ No newline at end of file diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c old mode 100644 new mode 100755 index b978f0af54c..84e79130487 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -251,11 +251,11 @@ static void prvTaskExitError( void ) void vPortSVCHandler( void ) { __asm volatile ( - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ - " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */ - " ldmia r0!, {r4-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ - " msr psp, r0 \n"/* Restore the task stack pointer. */ + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ " isb \n" " mov r0, #0 \n" " msr basepri, r0 \n" @@ -274,17 +274,17 @@ static void prvPortStartFirstTask( void ) * would otherwise result in the unnecessary leaving of space in the SVC stack * for lazy saving of FPU registers. */ __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n" " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ " msr control, r0 \n" - " cpsie i \n"/* Globally enable interrupts. */ + " cpsie i \n" /* Globally enable interrupts. */ " cpsie f \n" " dsb \n" " isb \n" - " svc 0 \n"/* System call to start first task. */ + " svc 0 \n" /* System call to start first task. */ " nop \n" " .ltorg \n" ); @@ -305,6 +305,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -336,20 +337,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -358,7 +385,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -453,15 +480,15 @@ void xPortPendSVHandler( void ) " mrs r0, psp \n" " isb \n" " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ " ldr r2, [r3] \n" " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n" " \n" - " stmdb r0!, {r4-r11, r14} \n"/* Save the core registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ " \n" " stmdb sp!, {r0, r3} \n" " mov r0, %0 \n" @@ -473,12 +500,12 @@ void xPortPendSVHandler( void ) " msr basepri, r0 \n" " ldmia sp!, {r0, r3} \n" " \n" - " ldr r1, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ " ldr r0, [r1] \n" " \n" - " ldmia r0!, {r4-r11, r14} \n"/* Pop the core registers. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ " it eq \n" " vldmiaeq r0!, {s16-s31} \n" " \n" @@ -772,10 +799,10 @@ static void vPortEnableVFP( void ) { __asm volatile ( - " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */ + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ " ldr r1, [r0] \n" " \n" - " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */ + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ " str r1, [r0] \n" " bx r14 \n" " .ltorg \n" diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c old mode 100644 new mode 100755 index 57d0ea5a2f6..682b64ea8c1 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -428,6 +428,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -459,20 +460,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -481,7 +508,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -1046,4 +1073,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ +/*-----------------------------------------------------------*/ \ No newline at end of file diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c old mode 100644 new mode 100755 index c602bd297cd..25a06bf46e2 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -245,11 +245,11 @@ static void prvTaskExitError( void ) void vPortSVCHandler( void ) { __asm volatile ( - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n"/* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ - " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. */ - " ldmia r0!, {r4-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ - " msr psp, r0 \n"/* Restore the task stack pointer. */ + " ldr r3, pxCurrentTCBConst2 \n" /* Restore the context. */ + " ldr r1, [r3] \n" /* Use pxCurrentTCBConst to get the pxCurrentTCB address. */ + " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the registers that are not automatically saved on exception entry and the critical nesting count. */ + " msr psp, r0 \n" /* Restore the task stack pointer. */ " isb \n" " mov r0, #0 \n" " msr basepri, r0 \n" @@ -268,17 +268,17 @@ static void prvPortStartFirstTask( void ) * would otherwise result in the unnecessary leaving of space in the SVC stack * for lazy saving of FPU registers. */ __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ " ldr r0, [r0] \n" " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " mov r0, #0 \n"/* Clear the bit that indicates the FPU is in use, see comment above. */ + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " mov r0, #0 \n" /* Clear the bit that indicates the FPU is in use, see comment above. */ " msr control, r0 \n" - " cpsie i \n"/* Globally enable interrupts. */ + " cpsie i \n" /* Globally enable interrupts. */ " cpsie f \n" " dsb \n" " isb \n" - " svc 0 \n"/* System call to start first task. */ + " svc 0 \n" /* System call to start first task. */ " nop \n" " .ltorg \n" ); @@ -293,6 +293,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -324,20 +325,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -346,7 +373,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif @@ -441,34 +468,34 @@ void xPortPendSVHandler( void ) " mrs r0, psp \n" " isb \n" " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ + " ldr r3, pxCurrentTCBConst \n" /* Get the location of the current TCB. */ " ldr r2, [r3] \n" " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, push high vfp registers. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n" " \n" - " stmdb r0!, {r4-r11, r14} \n"/* Save the core registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmdb r0!, {r4-r11, r14} \n" /* Save the core registers. */ + " str r0, [r2] \n" /* Save the new top of stack into the first member of the TCB. */ " \n" " stmdb sp!, {r0, r3} \n" " mov r0, %0 \n" - " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ " msr basepri, r0 \n" " dsb \n" " isb \n" - " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ " bl vTaskSwitchContext \n" " mov r0, #0 \n" " msr basepri, r0 \n" " ldmia sp!, {r0, r3} \n" " \n" - " ldr r1, [r3] \n"/* The first item in pxCurrentTCB is the task top of stack. */ + " ldr r1, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */ " ldr r0, [r1] \n" " \n" - " ldmia r0!, {r4-r11, r14} \n"/* Pop the core registers. */ + " ldmia r0!, {r4-r11, r14} \n" /* Pop the core registers. */ " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */ + " tst r14, #0x10 \n" /* Is the task using the FPU context? If so, pop the high vfp registers too. */ " it eq \n" " vldmiaeq r0!, {s16-s31} \n" " \n" @@ -762,10 +789,10 @@ static void vPortEnableVFP( void ) { __asm volatile ( - " ldr.w r0, =0xE000ED88 \n"/* The FPU enable bits are in the CPACR. */ + " ldr.w r0, =0xE000ED88 \n" /* The FPU enable bits are in the CPACR. */ " ldr r1, [r0] \n" " \n" - " orr r1, r1, #( 0xf << 20 ) \n"/* Enable CP10 and CP11 coprocessors, then save back. */ + " orr r1, r1, #( 0xf << 20 ) \n" /* Enable CP10 and CP11 coprocessors, then save back. */ " str r1, [r0] \n" " bx r14 \n" " .ltorg \n" diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c old mode 100644 new mode 100755 index f35ee98c798..a7a5ad86bd2 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -211,6 +211,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -242,20 +243,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -264,7 +291,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c old mode 100644 new mode 100755 index 90be11d0eda..9b4ed52fae5 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -249,6 +249,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -280,20 +281,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -302,7 +329,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c old mode 100644 new mode 100755 index dd6bde281c2..6aec5b34262 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -194,7 +194,7 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; /** * @brief Enter critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; @@ -203,7 +203,7 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; /** * @brief Exit from critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; @@ -316,9 +316,9 @@ void vPortSVCHandler_C( uint32_t * pulParam ) { __asm volatile ( - " mrs r1, control \n"/* Obtain current control value. */ - " bic r1, r1, #1 \n"/* Set privilege bit. */ - " msr control, r1 \n"/* Write back new control value. */ + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, r1, #1 \n" /* Set privilege bit. */ + " msr control, r1 \n" /* Write back new control value. */ ::: "r1", "memory" ); } @@ -328,9 +328,9 @@ void vPortSVCHandler_C( uint32_t * pulParam ) case portSVC_RAISE_PRIVILEGE: __asm volatile ( - " mrs r1, control \n"/* Obtain current control value. */ - " bic r1, r1, #1 \n"/* Set privilege bit. */ - " msr control, r1 \n"/* Write back new control value. */ + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, r1, #1 \n" /* Set privilege bit. */ + " msr control, r1 \n" /* Write back new control value. */ ::: "r1", "memory" ); break; @@ -352,6 +352,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define * configENABLE_ERRATA_837070_WORKAROUND to 1 in your * FreeRTOSConfig.h. */ @@ -360,74 +361,101 @@ BaseType_t xPortStartScheduler( void ) #endif #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Check that the maximum system call priority is nonzero after - * accounting for the number of priority bits supported by the - * hardware. A priority of 0 is invalid because setting the BASEPRI - * register to 0 unmasks all interrupts, and interrupts with priority 0 - * cannot be masked using BASEPRI. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( ucMaxSysCallPriority ); + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif - - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } + #endif + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the lowest priority interrupts. */ @@ -468,32 +496,49 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); + + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + * assert() if it is being called from an interrupt context. Only API + * functions that end in "FromISR" can be used in an interrupt. Only assert if + * the critical nesting count is 1 to protect against recursive calls if the + * assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so - * assert() if it is being called from an interrupt context. Only API - * functions that end in "FromISR" can be used in an interrupt. Only assert if - * the critical nesting count is 1 to protect against recursive calls if the - * assert function also uses a critical section. */ - if( uxCriticalNesting == 1 ) + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else { - configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + + /* This is not the interrupt safe version of the enter critical function so + * assert() if it is being called from an interrupt context. Only API + * functions that end in "FromISR" can be used in an interrupt. Only assert if + * the critical nesting count is 1 to protect against recursive calls if the + * assert function also uses a critical section. */ + if( uxCriticalNesting == 1 ) + { + configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); + } } - portMEMORY_BARRIER(); - - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ portDISABLE_INTERRUPTS(); uxCriticalNesting++; + /* This is not the interrupt safe version of the enter critical function so * assert() if it is being called from an interrupt context. Only API * functions that end in "FromISR" can be used in an interrupt. Only assert if @@ -503,45 +548,42 @@ void vPortEnterCritical( void ) { configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); } - } -#else - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - /* This is not the interrupt safe version of the enter critical function so - * assert() if it is being called from an interrupt context. Only API - * functions that end in "FromISR" can be used in an interrupt. Only assert if - * the critical nesting count is 1 to protect against recursive calls if the - * assert function also uses a critical section. */ - if( uxCriticalNesting == 1 ) - { - configASSERT( ( portNVIC_INT_CTRL_REG & portVECTACTIVE_MASK ) == 0 ); - } -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ void vPortExitCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); } - portMEMORY_BARRIER(); + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -549,16 +591,7 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - } -#else - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; - - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); - } -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ @@ -710,7 +743,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); /* Region number. */ + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c old mode 100644 new mode 100755 index e9e3805f054..1c53fa94e28 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -237,6 +237,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -268,20 +269,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -290,7 +317,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c old mode 100644 new mode 100755 index 886913fbc40..8e314b67092 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -299,6 +299,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -330,20 +331,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -352,7 +379,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c old mode 100644 new mode 100755 index ac4bef29572..e1bfc6afcc7 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -264,6 +264,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -295,20 +296,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -317,7 +344,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c old mode 100644 new mode 100755 index cdc063dc9f8..5233533c0c7 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -330,6 +330,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -361,20 +362,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -383,7 +410,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c old mode 100644 new mode 100755 index b9c7f90b446..d9978a913f7 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -201,7 +201,7 @@ void vResetPrivilege( void ); /** * @brief Enter critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortEnterCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortEnterCritical( void ) PRIVILEGED_FUNCTION; @@ -210,7 +210,7 @@ void vResetPrivilege( void ); /** * @brief Exit from critical section. */ -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) +#if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) void vPortExitCritical( void ) FREERTOS_SYSTEM_CALL; #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; @@ -412,6 +412,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) configASSERT( ( portCPUID == portCORTEX_M7_r0p1_ID ) || ( portCPUID == portCORTEX_M7_r0p0_ID ) ); #else + /* When using this port on a Cortex-M7 r0p0 or r0p1 core, define * configENABLE_ERRATA_837070_WORKAROUND to 1 in your * FreeRTOSConfig.h. */ @@ -420,74 +421,101 @@ BaseType_t xPortStartScheduler( void ) #endif #if ( configASSERT_DEFINED == 1 ) - { - volatile uint32_t ulOriginalPriority; - volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); - volatile uint8_t ucMaxPriorityValue; - - /* Determine the maximum priority from which ISR safe FreeRTOS API - * functions can be called. ISR safe functions are those that end in - * "FromISR". FreeRTOS maintains separate thread and ISR API functions to - * ensure interrupt entry is as fast and simple as possible. - * - * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = *pucFirstUserPriorityRegister; - /* Determine the number of priority bits available. First write to all - * possible bits. */ - *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; + /* Determine the number of priority bits available. First write to all + * possible bits. */ + *pucFirstUserPriorityRegister = portMAX_8_BIT_VALUE; - /* Read the value back to see how many bits stuck. */ - ucMaxPriorityValue = *pucFirstUserPriorityRegister; + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = *pucFirstUserPriorityRegister; - /* Use the same mask on the maximum system call priority. */ - ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; - /* Check that the maximum system call priority is nonzero after - * accounting for the number of priority bits supported by the - * hardware. A priority of 0 is invalid because setting the BASEPRI - * register to 0 unmasks all interrupts, and interrupts with priority 0 - * cannot be masked using BASEPRI. - * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ - configASSERT( ucMaxSysCallPriority ); + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); - /* Calculate the maximum acceptable priority group value for the number - * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ - while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) - { - ulMaxPRIGROUPValue--; - ucMaxPriorityValue <<= ( uint8_t ) 0x01; - } + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } - #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); - } - #endif - - /* Shift the priority group value back to its position within the AIRCR - * register. */ - ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; - ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; - - /* Restore the clobbered interrupt priority register to its original - * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + #ifdef __NVIC_PRIO_BITS + { + /* Check the CMSIS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } + #endif + + #ifdef configPRIO_BITS + { + /* Check the FreeRTOS configuration that defines the number of + * priority bits matches the number of priority bits actually queried + * from the hardware. */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + *pucFirstUserPriorityRegister = ulOriginalPriority; + } #endif /* configASSERT_DEFINED */ /* Make PendSV and SysTick the same priority as the kernel, and the SVC @@ -559,53 +587,63 @@ void vPortEndScheduler( void ) void vPortEnterCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; - portMEMORY_BARRIER(); + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + portMEMORY_BARRIER(); - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); + } + else + { + portDISABLE_INTERRUPTS(); + uxCriticalNesting++; + } + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ portDISABLE_INTERRUPTS(); uxCriticalNesting++; - } -#else - portDISABLE_INTERRUPTS(); - uxCriticalNesting++; -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ void vPortExitCritical( void ) { -#if( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) - if( portIS_PRIVILEGED() == pdFALSE ) - { - portRAISE_PRIVILEGE(); - portMEMORY_BARRIER(); + #if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) + if( portIS_PRIVILEGED() == pdFALSE ) + { + portRAISE_PRIVILEGE(); + portMEMORY_BARRIER(); - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + + portMEMORY_BARRIER(); + + portRESET_PRIVILEGE(); + portMEMORY_BARRIER(); } - portMEMORY_BARRIER(); + else + { + configASSERT( uxCriticalNesting ); + uxCriticalNesting--; - portRESET_PRIVILEGE(); - portMEMORY_BARRIER(); - } - else - { + if( uxCriticalNesting == 0 ) + { + portENABLE_INTERRUPTS(); + } + } + #else /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ configASSERT( uxCriticalNesting ); uxCriticalNesting--; @@ -613,16 +651,7 @@ void vPortExitCritical( void ) { portENABLE_INTERRUPTS(); } - } -#else - configASSERT( uxCriticalNesting ); - uxCriticalNesting--; - - if( uxCriticalNesting == 0 ) - { - portENABLE_INTERRUPTS(); - } -#endif + #endif /* if ( configALLOW_UNPRIVILEGED_CRITICAL_SECTIONS == 1 ) */ } /*-----------------------------------------------------------*/ @@ -910,7 +939,7 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, xMPUSettings->xRegion[ 0 ].ulRegionBaseAddress = ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */ ( portMPU_REGION_VALID ) | - ( portSTACK_REGION ); /* Region number. */ + ( portSTACK_REGION ); /* Region number. */ xMPUSettings->xRegion[ 0 ].ulRegionAttribute = ( portMPU_REGION_READ_WRITE ) | diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c old mode 100644 new mode 100755 index 7dadb9adb34..ee456d87a9b --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -314,6 +314,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -345,20 +346,46 @@ BaseType_t xPortStartScheduler( void ) /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ - ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS; while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) { - ulMaxPRIGROUPValue--; + ulImplementedPrioBits++; ucMaxPriorityValue <<= ( uint8_t ) 0x01; } + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + #ifdef __NVIC_PRIO_BITS { /* Check the CMSIS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == __NVIC_PRIO_BITS ); + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); } #endif @@ -367,7 +394,7 @@ BaseType_t xPortStartScheduler( void ) /* Check the FreeRTOS configuration that defines the number of * priority bits matches the number of priority bits actually queried * from the hardware. */ - configASSERT( ( portMAX_PRIGROUP_BITS - ulMaxPRIGROUPValue ) == configPRIO_BITS ); + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); } #endif diff --git a/portable/Tasking/ARM_CM4F/port.c b/portable/Tasking/ARM_CM4F/port.c old mode 100644 new mode 100755 index 66b0d629466..4ba5da739d0 --- a/portable/Tasking/ARM_CM4F/port.c +++ b/portable/Tasking/ARM_CM4F/port.c @@ -266,5 +266,4 @@ void prvSetupTimerInterrupt( void ) /* Configure SysTick to interrupt at the requested rate. */ *( portNVIC_SYSTICK_LOAD ) = ( configCPU_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL; *( portNVIC_SYSTICK_CTRL ) = portNVIC_SYSTICK_CLK | portNVIC_SYSTICK_INT | portNVIC_SYSTICK_ENABLE; -} -/*-----------------------------------------------------------*/ +} \ No newline at end of file From 97e58da31362f180642a381f9ea55aee04546c83 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Tue, 28 Mar 2023 14:28:47 +0530 Subject: [PATCH 28/62] Remove C90 requirement from CMakeLists (#649) This is needed as it is breaking projects - https://forums.freertos.org/t/freertos-gcc-cmake/16984 We will re-evaluate and accordingly add this later. Signed-off-by: Gaurav Aggarwal --- CMakeLists.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6d83db88b9c..d45de64b1b5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -292,5 +292,3 @@ target_link_libraries(freertos_kernel ) ######################################################################## -# Requirements -set_property(TARGET freertos_kernel PROPERTY C_STANDARD 90) From 68f105375f65ffd347e3e322d46200cb389d8886 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Tue, 28 Mar 2023 17:01:37 +0530 Subject: [PATCH 29/62] Only add alignment padding when needed (#650) Heap 4 and Heap 5 add some padding to ensure that the allocated blocks are always aligned to portBYTE_ALIGNMENT bytes. The code until now was adding padding always even if the resulting block was already aligned. This commits updates the code to only add padding if the resulting block is not aligned. Signed-off-by: Gaurav Aggarwal --- portable/MemMang/heap_4.c | 30 ++++++++++++++++++++++++------ portable/MemMang/heap_5.c | 30 ++++++++++++++++++++++++------ 2 files changed, 48 insertions(+), 12 deletions(-) diff --git a/portable/MemMang/heap_4.c b/portable/MemMang/heap_4.c index 8ca7c81e4f3..c7a8209ed2b 100644 --- a/portable/MemMang/heap_4.c +++ b/portable/MemMang/heap_4.c @@ -159,13 +159,31 @@ void * pvPortMalloc( size_t xWantedSize ) if( xWantedSize > 0 ) { /* The wanted size must be increased so it can contain a BlockLink_t - * structure in addition to the requested amount of bytes. Some - * additional increment may also be needed for alignment. */ - xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); - - if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + * structure in addition to the requested amount of bytes. */ + if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 ) { - xWantedSize += xAdditionalRequiredSize; + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + * of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + { + xWantedSize += xAdditionalRequiredSize; + } + else + { + xWantedSize = 0; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { diff --git a/portable/MemMang/heap_5.c b/portable/MemMang/heap_5.c index 3a1df9b64c9..db9e1eb37ad 100644 --- a/portable/MemMang/heap_5.c +++ b/portable/MemMang/heap_5.c @@ -170,13 +170,31 @@ void * pvPortMalloc( size_t xWantedSize ) if( xWantedSize > 0 ) { /* The wanted size must be increased so it can contain a BlockLink_t - * structure in addition to the requested amount of bytes. Some - * additional increment may also be needed for alignment. */ - xAdditionalRequiredSize = xHeapStructSize + portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); - - if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + * structure in addition to the requested amount of bytes. */ + if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 ) { - xWantedSize += xAdditionalRequiredSize; + xWantedSize += xHeapStructSize; + + /* Ensure that blocks are always aligned to the required number + * of bytes. */ + if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) + { + /* Byte alignment required. */ + xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ); + + if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 ) + { + xWantedSize += xAdditionalRequiredSize; + } + else + { + xWantedSize = 0; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } } else { From 1b8a4244bdd911f156ce5bc6f9f13dcde68228cb Mon Sep 17 00:00:00 2001 From: Nicolas Date: Wed, 29 Mar 2023 15:23:45 +0200 Subject: [PATCH 30/62] add a missing comma (#651) --- include/task.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/task.h b/include/task.h index 054f43644a1..702f74dce9b 100644 --- a/include/task.h +++ b/include/task.h @@ -1664,7 +1664,7 @@ configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVIL /** * task.h * @code{c} - * void vApplicationStackOverflowHook( TaskHandle_t xTask char *pcTaskName); + * void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName); * @endcode * * The application stack overflow hook is called when a stack overflow is detected for a task. From aa987a3443b60894bc64714135bca44831406886 Mon Sep 17 00:00:00 2001 From: Vo Trung Chi Date: Tue, 4 Apr 2023 22:10:54 +0700 Subject: [PATCH 31/62] fix conversion warning (#658) FreeRTOS-Kernel/portable/GCC/ARM_CM4F/port.c:399:41: error: conversion from 'uint32_t' {aka 'long unsigned int'} to 'uint8_t' {aka 'unsigned char'} may change value [-Werror=conversion] Signed-off-by: Vo Trung Chi --- portable/CCS/ARM_CM3/port.c | 6 +++--- portable/CCS/ARM_CM4F/port.c | 6 +++--- portable/GCC/ARM_CA53_64_BIT/port.c | 6 +++--- portable/GCC/ARM_CA9/port.c | 6 +++--- portable/GCC/ARM_CM3/port.c | 6 +++--- portable/GCC/ARM_CM3_MPU/port.c | 6 +++--- portable/GCC/ARM_CM4F/port.c | 6 +++--- portable/GCC/ARM_CM4_MPU/port.c | 6 +++--- portable/GCC/ARM_CM7/r0p1/port.c | 6 +++--- portable/GCC/ARM_CR5/port.c | 6 +++--- portable/IAR/ARM_CM3/port.c | 6 +++--- portable/IAR/ARM_CM4F/port.c | 6 +++--- portable/IAR/ARM_CM4F_MPU/port.c | 6 +++--- portable/IAR/ARM_CM7/r0p1/port.c | 6 +++--- portable/MikroC/ARM_CM4F/port.c | 6 +++--- portable/RVDS/ARM_CM3/port.c | 6 +++--- portable/RVDS/ARM_CM4F/port.c | 6 +++--- portable/RVDS/ARM_CM4_MPU/port.c | 6 +++--- portable/RVDS/ARM_CM7/r0p1/port.c | 6 +++--- 19 files changed, 57 insertions(+), 57 deletions(-) diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index 63dc600a756..ef5fa5b9340 100755 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -218,7 +218,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -229,7 +229,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -310,7 +310,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index a741b091561..c43cf0ef314 100755 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -237,7 +237,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -248,7 +248,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -329,7 +329,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CA53_64_BIT/port.c b/portable/GCC/ARM_CA53_64_BIT/port.c index 3e686e33811..545bac15d86 100644 --- a/portable/GCC/ARM_CA53_64_BIT/port.c +++ b/portable/GCC/ARM_CA53_64_BIT/port.c @@ -270,14 +270,14 @@ uint32_t ulAPSR; #if( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); volatile uint8_t ucMaxPriorityValue; /* Determine how many priority bits are implemented in the GIC. Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all possible bits. */ @@ -300,7 +300,7 @@ uint32_t ulAPSR; /* Restore the clobbered interrupt priority register to its original value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CA9/port.c b/portable/GCC/ARM_CA9/port.c index 75e69f876a2..8c74214117c 100644 --- a/portable/GCC/ARM_CA9/port.c +++ b/portable/GCC/ARM_CA9/port.c @@ -329,14 +329,14 @@ uint32_t ulAPSR; #if( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); volatile uint8_t ucMaxPriorityValue; /* Determine how many priority bits are implemented in the GIC. Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all possible bits. */ @@ -357,7 +357,7 @@ uint32_t ulAPSR; /* Restore the clobbered interrupt priority register to its original value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index 57337e60502..4aa1f2425d7 100755 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -261,7 +261,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -272,7 +272,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -353,7 +353,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index 4b21236e785..e33df014c0e 100755 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -384,7 +384,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -395,7 +395,7 @@ BaseType_t xPortStartScheduler( void ) * to ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -476,7 +476,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index 84e79130487..fd9e6dbb8f2 100755 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -304,7 +304,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -315,7 +315,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -396,7 +396,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index 682b64ea8c1..1733fd82072 100755 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -427,7 +427,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -438,7 +438,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -519,7 +519,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 25a06bf46e2..316dba13b8e 100755 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -292,7 +292,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -303,7 +303,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -384,7 +384,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/GCC/ARM_CR5/port.c b/portable/GCC/ARM_CR5/port.c index 03007821967..2cbc24dbf9d 100644 --- a/portable/GCC/ARM_CR5/port.c +++ b/portable/GCC/ARM_CR5/port.c @@ -411,7 +411,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( configINTERRUPT_CONTROLLER_BASE_ADDRESS + portINTERRUPT_PRIORITY_REGISTER_OFFSET ); volatile uint8_t ucMaxPriorityValue; @@ -419,7 +419,7 @@ BaseType_t xPortStartScheduler( void ) * Determine how many priority bits are implemented in the GIC. * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* * Determine the number of priority bits available. First write to @@ -457,7 +457,7 @@ BaseType_t xPortStartScheduler( void ) * Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index a7a5ad86bd2..f1c78e46240 100755 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -210,7 +210,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -221,7 +221,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -302,7 +302,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index 9b4ed52fae5..05d5be0aa65 100755 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -248,7 +248,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -259,7 +259,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -340,7 +340,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 6aec5b34262..69b7bc5d9bc 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -362,7 +362,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -373,7 +373,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -454,7 +454,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 1c53fa94e28..9217653a7d4 100755 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -236,7 +236,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -247,7 +247,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -328,7 +328,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 8e314b67092..1936de19447 100755 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -298,7 +298,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( volatile uint8_t * const ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -309,7 +309,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -390,7 +390,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index e1bfc6afcc7..2ffdd9cc8bd 100755 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -263,7 +263,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -274,7 +274,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -355,7 +355,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index 5233533c0c7..bf2fb86f76a 100755 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -329,7 +329,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -340,7 +340,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -421,7 +421,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index d9978a913f7..e0bd8c86d0f 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -422,7 +422,7 @@ BaseType_t xPortStartScheduler( void ) #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -433,7 +433,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -514,7 +514,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index ee456d87a9b..2e81e324df9 100755 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -313,7 +313,7 @@ BaseType_t xPortStartScheduler( void ) { #if ( configASSERT_DEFINED == 1 ) { - volatile uint32_t ulOriginalPriority; + volatile uint8_t ucOriginalPriority; volatile uint32_t ulImplementedPrioBits = 0; volatile uint8_t * const pucFirstUserPriorityRegister = ( uint8_t * ) ( portNVIC_IP_REGISTERS_OFFSET_16 + portFIRST_USER_INTERRUPT_NUMBER ); volatile uint8_t ucMaxPriorityValue; @@ -324,7 +324,7 @@ BaseType_t xPortStartScheduler( void ) * ensure interrupt entry is as fast and simple as possible. * * Save the interrupt priority value that is about to be clobbered. */ - ulOriginalPriority = *pucFirstUserPriorityRegister; + ucOriginalPriority = *pucFirstUserPriorityRegister; /* Determine the number of priority bits available. First write to all * possible bits. */ @@ -405,7 +405,7 @@ BaseType_t xPortStartScheduler( void ) /* Restore the clobbered interrupt priority register to its original * value. */ - *pucFirstUserPriorityRegister = ulOriginalPriority; + *pucFirstUserPriorityRegister = ucOriginalPriority; } #endif /* configASSERT_DEFINED */ From 686b6e62eb48f0bab57cab47fbbd90d0d23504e8 Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Wed, 19 Apr 2023 22:24:54 -0700 Subject: [PATCH 32/62] ARMv7M: Adjust implemented priority bit assertions (#665) Adjust assertions related to the CMSIS __NVIC_PRIO_BITS and FreeRTOS configPRIO_BITS configuration macros such that these macros specify the minimum number of implemented priority bits supported by a config build rather than the exact number of implemented priority bits. Related to Qemu issue #1122 --- portable/CCS/ARM_CM3/port.c | 20 ++++++++++++-------- portable/CCS/ARM_CM4F/port.c | 20 ++++++++++++-------- portable/GCC/ARM_CM3/port.c | 20 ++++++++++++-------- portable/GCC/ARM_CM3_MPU/port.c | 28 ++++++++++++++++------------ portable/GCC/ARM_CM4F/port.c | 20 ++++++++++++-------- portable/GCC/ARM_CM4_MPU/port.c | 28 ++++++++++++++++------------ portable/GCC/ARM_CM7/r0p1/port.c | 20 ++++++++++++-------- portable/IAR/ARM_CM3/port.c | 20 ++++++++++++-------- portable/IAR/ARM_CM4F/port.c | 20 ++++++++++++-------- portable/IAR/ARM_CM4F_MPU/port.c | 20 ++++++++++++-------- portable/IAR/ARM_CM7/r0p1/port.c | 20 ++++++++++++-------- portable/MikroC/ARM_CM4F/port.c | 20 ++++++++++++-------- portable/RVDS/ARM_CM3/port.c | 20 ++++++++++++-------- portable/RVDS/ARM_CM4F/port.c | 20 ++++++++++++-------- portable/RVDS/ARM_CM4_MPU/port.c | 20 ++++++++++++-------- portable/RVDS/ARM_CM7/r0p1/port.c | 20 ++++++++++++-------- 16 files changed, 200 insertions(+), 136 deletions(-) diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index ef5fa5b9340..f3c4e5add03 100755 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -287,19 +287,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index c43cf0ef314..c675afe67b4 100755 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -306,19 +306,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index 4aa1f2425d7..9b42eac5a4e 100755 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -330,19 +330,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index e33df014c0e..619f2b0c8be 100755 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -452,21 +452,25 @@ BaseType_t xPortStartScheduler( void ) } #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); + } #endif #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); + } #endif /* Shift the priority group value back to its position within the AIRCR diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index fd9e6dbb8f2..88fc76db894 100755 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -373,19 +373,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index 1733fd82072..ab76ee84204 100755 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -495,21 +495,25 @@ BaseType_t xPortStartScheduler( void ) } #ifdef __NVIC_PRIO_BITS - { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried + * from hardware is at least as many as specified in the + * CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); + } #endif #ifdef configPRIO_BITS - { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } + { + /* + * Check that the number of implemented priority bits queried + * from hardware is at least as many as specified in the + * FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); + } #endif /* Shift the priority group value back to its position within the AIRCR diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 316dba13b8e..2be4f27704d 100755 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -361,19 +361,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index f1c78e46240..d54c3aceb4a 100755 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -279,19 +279,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index 05d5be0aa65..e0deaf12840 100755 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -317,19 +317,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 69b7bc5d9bc..1b7cca65c86 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -431,19 +431,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 9217653a7d4..63f83993db9 100755 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -305,19 +305,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 1936de19447..8ef593f5523 100755 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -367,19 +367,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index 2ffdd9cc8bd..ae7ce37f37b 100755 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -332,19 +332,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index bf2fb86f76a..cb003aa38f9 100755 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -398,19 +398,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index e0bd8c86d0f..13e2f8a8ed1 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -491,19 +491,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index 2e81e324df9..1df54ab2802 100755 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -382,19 +382,23 @@ BaseType_t xPortStartScheduler( void ) #ifdef __NVIC_PRIO_BITS { - /* Check the CMSIS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the CMSIS + * __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); } #endif #ifdef configPRIO_BITS { - /* Check the FreeRTOS configuration that defines the number of - * priority bits matches the number of priority bits actually queried - * from the hardware. */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + /* + * Check that the number of implemented priority bits queried from + * hardware is at least as many as specified in the FreeRTOS + * configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); } #endif From 714e5432472c7009f03c26c12ae37d62a0c5c2ca Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Tue, 18 Apr 2023 12:01:49 -0700 Subject: [PATCH 33/62] Format portmacro.h in arm CM0 ports --- portable/GCC/ARM_CM0/portmacro.h | 120 +++++++++++++++--------------- portable/IAR/ARM_CM0/portmacro.h | 114 ++++++++++++++-------------- portable/RVDS/ARM_CM0/portmacro.h | 100 +++++++++++++------------ 3 files changed, 169 insertions(+), 165 deletions(-) diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index 408162d6402..db69cfddf5b 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -28,11 +28,11 @@ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus +extern "C" { +#endif /*----------------------------------------------------------- * Port specific definitions. @@ -45,84 +45,86 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ - extern void vPortYield( void ); - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portYIELD() vPortYield() - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +extern void vPortYield( void ); +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portYIELD() vPortYield() +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /* Critical section management. */ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); - - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) - #define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) - #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulSetInterruptMaskFromISR( void ) __attribute__( ( naked ) ); +extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( naked ) ); + +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /* Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portNOP() +#define portNOP() - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus +} +#endif #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM0/portmacro.h b/portable/IAR/ARM_CM0/portmacro.h index ce1aa08fc9d..622b584c006 100644 --- a/portable/IAR/ARM_CM0/portmacro.h +++ b/portable/IAR/ARM_CM0/portmacro.h @@ -27,11 +27,11 @@ */ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +#ifdef __cplusplus +extern "C" { +#endif /*----------------------------------------------------------- * Port specific definitions. @@ -44,87 +44,87 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ /* Scheduler utilities. */ - extern void vPortYield( void ); - #define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 ) - #define portNVIC_PENDSVSET 0x10000000 - #define portYIELD() vPortYield() - #define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +extern void vPortYield( void ); +#define portNVIC_INT_CTRL ( ( volatile uint32_t * ) 0xe000ed04 ) +#define portNVIC_PENDSVSET 0x10000000 +#define portYIELD() vPortYield() +#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) *( portNVIC_INT_CTRL ) = portNVIC_PENDSVSET +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /* Critical section management. */ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulSetInterruptMaskFromISR( void ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ); +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulSetInterruptMaskFromISR( void ); +extern void vClearInterruptMaskFromISR( uint32_t ulMask ); - #define portDISABLE_INTERRUPTS() __asm volatile ( "cpsid i" ) - #define portENABLE_INTERRUPTS() __asm volatile ( "cpsie i" ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) +#define portDISABLE_INTERRUPTS() __asm volatile ( "cpsid i" ) +#define portENABLE_INTERRUPTS() __asm volatile ( "cpsie i" ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) /*-----------------------------------------------------------*/ /* Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portNOP() +#define portNOP() /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ - #pragma diag_suppress=Pa082 +#pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +#ifdef __cplusplus +} +#endif #endif /* PORTMACRO_H */ diff --git a/portable/RVDS/ARM_CM0/portmacro.h b/portable/RVDS/ARM_CM0/portmacro.h index 54165e74cc4..fb1eea7cad0 100644 --- a/portable/RVDS/ARM_CM0/portmacro.h +++ b/portable/RVDS/ARM_CM0/portmacro.h @@ -47,76 +47,78 @@ */ /* Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 /*-----------------------------------------------------------*/ /* Scheduler utilities. */ - extern void vPortYield( void ); - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portYIELD() vPortYield() - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +extern void vPortYield( void ); +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portYIELD() vPortYield() +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /* Critical section management. */ - extern void vPortEnterCritical( void ); - extern void vPortExitCritical( void ); - extern uint32_t ulSetInterruptMaskFromISR( void ); - extern void vClearInterruptMaskFromISR( uint32_t ulMask ); - - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) - #define portDISABLE_INTERRUPTS() __disable_irq() - #define portENABLE_INTERRUPTS() __enable_irq() - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +extern void vPortEnterCritical( void ); +extern void vPortExitCritical( void ); +extern uint32_t ulSetInterruptMaskFromISR( void ); +extern void vClearInterruptMaskFromISR( uint32_t ulMask ); + +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMaskFromISR() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMaskFromISR( x ) +#define portDISABLE_INTERRUPTS() __disable_irq() +#define portENABLE_INTERRUPTS() __enable_irq() +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /* Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /* Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portNOP() +#define portNOP() /* *INDENT-OFF* */ #ifdef __cplusplus From 5f19e34f878af97810a7662a75eac59bd74d628b Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Tue, 18 Apr 2023 14:43:29 -0700 Subject: [PATCH 34/62] portable/ARM_CM0: Add xPortIsInsideInterrupt Add missing xPortIsInsideInterrupt function to Cortex-M0 port. --- portable/GCC/ARM_CM0/portmacro.h | 40 +++++++++++++++++++++++++++++-- portable/IAR/ARM_CM0/portmacro.h | 39 ++++++++++++++++++++++++++++-- portable/RVDS/ARM_CM0/portmacro.h | 35 +++++++++++++++++++++++++++ 3 files changed, 110 insertions(+), 4 deletions(-) diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index db69cfddf5b..b9e9ef6623f 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -123,8 +125,42 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ) __attribute__( ( nake #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) -#ifdef __cplusplus + +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; } + +/*-----------------------------------------------------------*/ + + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM0/portmacro.h b/portable/IAR/ARM_CM0/portmacro.h index 622b584c006..5dcc949b228 100644 --- a/portable/IAR/ARM_CM0/portmacro.h +++ b/portable/IAR/ARM_CM0/portmacro.h @@ -26,12 +26,15 @@ * */ + #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -118,13 +121,45 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ); #define portNOP() +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif + +/*-----------------------------------------------------------*/ + +portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in * the source code because to do so would cause other compilers to generate * warnings. */ #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/RVDS/ARM_CM0/portmacro.h b/portable/RVDS/ARM_CM0/portmacro.h index fb1eea7cad0..4a1ea8a7b3a 100644 --- a/portable/RVDS/ARM_CM0/portmacro.h +++ b/portable/RVDS/ARM_CM0/portmacro.h @@ -120,6 +120,41 @@ extern void vClearInterruptMaskFromISR( uint32_t ulMask ); #define portNOP() +#define portINLINE __inline + +#ifndef portFORCE_INLINE + #define portFORCE_INLINE __forceinline +#endif + +/*-----------------------------------------------------------*/ + +static portFORCE_INLINE BaseType_t xPortIsInsideInterrupt( void ) +{ + uint32_t ulCurrentInterrupt; + BaseType_t xReturn; + + /* Obtain the number of the currently executing interrupt. */ + __asm + { +/* *INDENT-OFF* */ + mrs ulCurrentInterrupt, ipsr +/* *INDENT-ON* */ + } + + if( ulCurrentInterrupt == 0 ) + { + xReturn = pdFALSE; + } + else + { + xReturn = pdTRUE; + } + + return xReturn; +} + +/*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #ifdef __cplusplus } From c1980cedb110e3826fbf22d93fccaa2ca3d9daf8 Mon Sep 17 00:00:00 2001 From: Paul Bartell Date: Thu, 20 Apr 2023 12:46:00 -0700 Subject: [PATCH 35/62] tree-wide: Unify formatting of __cplusplus ifdefs --- .../portable/GCC/ARM_CM23/portmacro.h | 4 ++++ .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 4 ++++ .../portable/GCC/ARM_CM33/portmacro.h | 4 ++++ .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 4 ++++ .../portable/GCC/ARM_CM35P/portmacro.h | 4 ++++ .../portable/GCC/ARM_CM55/portmacro.h | 4 ++++ .../portable/GCC/ARM_CM85/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM23/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM23_NTZ/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM33/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM33_NTZ/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM35P/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM55/portmacro.h | 4 ++++ .../portable/IAR/ARM_CM85/portmacro.h | 4 ++++ portable/ARMv8M/non_secure/portmacrocommon.h | 16 ++++++++------ portable/CCS/ARM_CM3/portmacro.h | 16 ++++++++------ portable/CCS/ARM_CM4F/portmacro.h | 16 ++++++++------ portable/CodeWarrior/ColdFire_V1/portmacro.h | 8 +++++-- portable/CodeWarrior/ColdFire_V2/portmacro.h | 8 +++++-- portable/GCC/ARM7_AT91FR40008/portmacro.h | 8 +++++-- portable/GCC/ARM7_AT91SAM7S/portmacro.h | 8 +++++-- portable/GCC/ARM7_LPC2000/portmacro.h | 8 +++++-- portable/GCC/ARM7_LPC23xx/portmacro.h | 8 +++++-- portable/GCC/ARM_CA53_64_BIT/portmacro.h | 13 +++++++----- portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h | 13 +++++++----- portable/GCC/ARM_CA9/portmacro.h | 13 +++++++----- portable/GCC/ARM_CM0/portmacro.h | 1 - portable/GCC/ARM_CM23/non_secure/portmacro.h | 4 ++++ .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 16 ++++++++------ .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/GCC/ARM_CM3/portmacro.h | 16 ++++++++------ portable/GCC/ARM_CM33/non_secure/portmacro.h | 4 ++++ .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 16 ++++++++------ .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/GCC/ARM_CM35P/non_secure/portmacro.h | 4 ++++ .../ARM_CM35P/non_secure/portmacrocommon.h | 16 ++++++++------ .../GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 4 ++++ .../non_secure/portmacrocommon.h | 16 ++++++++------ portable/GCC/ARM_CM3_MPU/portmacro.h | 21 ++++++++++++------- portable/GCC/ARM_CM4F/portmacro.h | 16 ++++++++------ portable/GCC/ARM_CM55/non_secure/portmacro.h | 4 ++++ .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 16 ++++++++------ .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/GCC/ARM_CM7/r0p1/portmacro.h | 16 ++++++++------ portable/GCC/ARM_CM85/non_secure/portmacro.h | 4 ++++ .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 16 ++++++++------ .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/GCC/ARM_CRx_No_GIC/portmacro.h | 7 +++++-- portable/GCC/ATMega323/portmacro.h | 8 +++++-- portable/GCC/AVR32_UC3/portmacro.h | 8 +++++-- portable/GCC/CORTUS_APS3/portmacro.h | 8 +++++-- portable/GCC/ColdFire_V2/portmacro.h | 8 +++++-- portable/GCC/H8S2329/portmacro.h | 8 +++++-- portable/GCC/HCS12/portmacro.h | 8 +++++-- portable/GCC/IA32_flat/portmacro.h | 6 +++++- portable/GCC/MSP430F449/portmacro.h | 8 +++++-- portable/GCC/MicroBlaze/portmacro.h | 8 +++++-- portable/GCC/MicroBlazeV8/portmacro.h | 8 +++++-- portable/GCC/MicroBlazeV9/portmacro.h | 8 +++++-- portable/GCC/NiosII/portmacro.h | 8 +++++-- portable/GCC/PPC405_Xilinx/portmacro.h | 8 +++++-- portable/GCC/PPC440_Xilinx/portmacro.h | 8 +++++-- portable/GCC/RISC-V/portmacro.h | 8 +++++-- portable/GCC/RX100/portmacro.h | 8 +++++-- portable/GCC/RX200/portmacro.h | 8 +++++-- portable/GCC/RX600/portmacro.h | 8 +++++-- portable/GCC/RX600v2/portmacro.h | 8 +++++-- portable/GCC/RX700v3_DPFPU/portmacro.h | 16 ++++++++------ portable/GCC/STR75x/portmacro.h | 8 +++++-- portable/GCC/TriCore_1782/portmacro.h | 8 +++++-- portable/IAR/78K0R/portmacro.h | 8 +++++-- portable/IAR/ARM_CA5_No_GIC/portmacro.h | 21 ++++++++++++------- portable/IAR/ARM_CA9/portmacro.h | 21 +++++++++++-------- portable/IAR/ARM_CM23/non_secure/portmacro.h | 4 ++++ .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 16 ++++++++------ .../IAR/ARM_CM23_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/IAR/ARM_CM3/portmacro.h | 16 ++++++++------ portable/IAR/ARM_CM33/non_secure/portmacro.h | 4 ++++ .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 16 ++++++++------ .../IAR/ARM_CM33_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/IAR/ARM_CM35P/non_secure/portmacro.h | 4 ++++ .../ARM_CM35P/non_secure/portmacrocommon.h | 16 ++++++++------ .../IAR/ARM_CM35P_NTZ/non_secure/portmacro.h | 4 ++++ .../non_secure/portmacrocommon.h | 16 ++++++++------ portable/IAR/ARM_CM4F/portmacro.h | 16 ++++++++------ portable/IAR/ARM_CM55/non_secure/portmacro.h | 4 ++++ .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 16 ++++++++------ .../IAR/ARM_CM55_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/IAR/ARM_CM7/r0p1/portmacro.h | 16 ++++++++------ portable/IAR/ARM_CM85/non_secure/portmacro.h | 4 ++++ .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 16 ++++++++------ .../IAR/ARM_CM85_NTZ/non_secure/portmacro.h | 4 ++++ .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 16 ++++++++------ portable/IAR/ARM_CRx_No_GIC/portmacro.h | 7 +++++-- portable/IAR/ATMega323/portmacro.h | 8 +++++-- portable/IAR/AVR32_UC3/portmacro.h | 8 +++++-- portable/IAR/AtmelSAM7S64/portmacro.h | 8 +++++-- portable/IAR/AtmelSAM9XE/portmacro.h | 8 +++++-- portable/IAR/LPC2000/portmacro.h | 8 +++++-- portable/IAR/RISC-V/portmacro.h | 8 +++++-- portable/IAR/RL78/portmacro.h | 10 ++++++--- portable/IAR/RX100/portmacro.h | 8 +++++-- portable/IAR/RX600/portmacro.h | 8 +++++-- portable/IAR/RX700v3_DPFPU/portmacro.h | 16 ++++++++------ portable/IAR/RXv2/portmacro.h | 8 +++++-- portable/IAR/STR71x/portmacro.h | 8 +++++-- portable/IAR/STR75x/portmacro.h | 8 +++++-- portable/IAR/STR91x/portmacro.h | 8 +++++-- portable/IAR/V850ES/portmacro.h | 8 +++++-- portable/MPLAB/PIC24_dsPIC/portmacro.h | 8 +++++-- portable/MPLAB/PIC32MEC14xx/portmacro.h | 8 +++++-- portable/MPLAB/PIC32MX/portmacro.h | 8 +++++-- portable/MPLAB/PIC32MZ/portmacro.h | 8 +++++-- portable/MikroC/ARM_CM4F/portmacro.h | 16 ++++++++------ .../Tern_EE/large_untested/portmacro.h | 8 +++++-- portable/Paradigm/Tern_EE/small/portmacro.h | 8 +++++-- portable/RVDS/ARM_CA9/portmacro.h | 8 +++++-- portable/Renesas/RX100/portmacro.h | 8 +++++-- portable/Renesas/RX200/portmacro.h | 8 +++++-- portable/Renesas/RX600/portmacro.h | 8 +++++-- portable/Renesas/RX600v2/portmacro.h | 8 +++++-- portable/Renesas/RX700v3_DPFPU/portmacro.h | 16 ++++++++------ portable/Renesas/SH2A_FPU/portmacro.h | 8 +++++-- portable/Tasking/ARM_CM4F/portmacro.h | 16 ++++++++------ .../ThirdParty/CDK/T-HEAD_CK802/portmacro.h | 11 +++++++--- portable/ThirdParty/GCC/ARC_v1/portmacro.h | 16 ++++++++------ portable/ThirdParty/GCC/ATmega/portmacro.h | 8 +++++-- portable/ThirdParty/GCC/Posix/portmacro.h | 8 +++++-- .../ThirdParty/GCC/RP2040/include/portmacro.h | 16 ++++++++------ .../GCC/RP2040/include/rp2040_config.h | 8 +++++-- .../GCC/Xtensa_ESP32/include/port_systick.h | 8 +++++-- .../GCC/Xtensa_ESP32/include/xtensa_config.h | 17 ++++++++------- portable/ThirdParty/XCC/Xtensa/portmacro.h | 8 +++++-- .../ThirdParty/XCC/Xtensa/xtensa_config.h | 8 +++++-- portable/oWatcom/16BitDOS/Flsh186/portmacro.h | 8 +++++-- portable/oWatcom/16BitDOS/PC/portmacro.h | 8 +++++-- 143 files changed, 958 insertions(+), 395 deletions(-) diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index c6dad99857c..6a2ed748524 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index c6dad99857c..6a2ed748524 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 4fe8c59147a..27da496744d 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 4fe8c59147a..27da496744d 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h index 33bfb283461..dfc8365305e 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index adb47d8420f..3cde19a73d5 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index fec6923394c..0fd11fcdf3a 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h index d845ac1caa0..b4d11dfdbcf 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h index d845ac1caa0..b4d11dfdbcf 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h index b6df20eb88f..cbfa60241fa 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h index b6df20eb88f..cbfa60241fa 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h index a0efc1f9dcf..70b73a31794 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h index a3b510e282c..7783218fe0b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -76,8 +78,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h index cfaae813eac..7aba706018e 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -76,8 +78,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/CCS/ARM_CM3/portmacro.h b/portable/CCS/ARM_CM3/portmacro.h index 7ac1d709caf..9c405d4c87f 100644 --- a/portable/CCS/ARM_CM3/portmacro.h +++ b/portable/CCS/ARM_CM3/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -166,8 +168,10 @@ /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/CCS/ARM_CM4F/portmacro.h b/portable/CCS/ARM_CM4F/portmacro.h index 34988c223f5..64c702c941c 100644 --- a/portable/CCS/ARM_CM4F/portmacro.h +++ b/portable/CCS/ARM_CM4F/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -160,8 +162,10 @@ /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/CodeWarrior/ColdFire_V1/portmacro.h b/portable/CodeWarrior/ColdFire_V1/portmacro.h index edae69ab2b2..8acfcd04fdd 100644 --- a/portable/CodeWarrior/ColdFire_V1/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V1/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -107,8 +109,10 @@ extern void vPortClearInterruptMaskFromISR( UBaseType_t ); #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired != pdFALSE ) { portYIELD(); } } while( 0 ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/CodeWarrior/ColdFire_V2/portmacro.h b/portable/CodeWarrior/ColdFire_V2/portmacro.h index 665ae4b304c..053b0adecb6 100644 --- a/portable/CodeWarrior/ColdFire_V2/portmacro.h +++ b/portable/CodeWarrior/ColdFire_V2/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,8 +108,10 @@ extern void vPortClearInterruptMaskFromISR( UBaseType_t ); #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired != pdFALSE ) { portYIELD(); } } while( 0 ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_AT91FR40008/portmacro.h b/portable/GCC/ARM7_AT91FR40008/portmacro.h index 69ee70f078c..201f3c7a795 100644 --- a/portable/GCC/ARM7_AT91FR40008/portmacro.h +++ b/portable/GCC/ARM7_AT91FR40008/portmacro.h @@ -52,9 +52,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -250,8 +252,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_AT91SAM7S/portmacro.h b/portable/GCC/ARM7_AT91SAM7S/portmacro.h index 1a440a68c5f..b52fbe0aa45 100644 --- a/portable/GCC/ARM7_AT91SAM7S/portmacro.h +++ b/portable/GCC/ARM7_AT91SAM7S/portmacro.h @@ -52,9 +52,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_LPC2000/portmacro.h b/portable/GCC/ARM7_LPC2000/portmacro.h index 50922065441..6545e1127c4 100644 --- a/portable/GCC/ARM7_LPC2000/portmacro.h +++ b/portable/GCC/ARM7_LPC2000/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -221,8 +223,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM7_LPC23xx/portmacro.h b/portable/GCC/ARM7_LPC23xx/portmacro.h index 768e86dbed1..f60cdde5331 100644 --- a/portable/GCC/ARM7_LPC23xx/portmacro.h +++ b/portable/GCC/ARM7_LPC23xx/portmacro.h @@ -52,9 +52,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ extern void vPortExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CA53_64_BIT/portmacro.h b/portable/GCC/ARM_CA53_64_BIT/portmacro.h index 1b2e277cf05..00fc061644b 100644 --- a/portable/GCC/ARM_CA53_64_BIT/portmacro.h +++ b/portable/GCC/ARM_CA53_64_BIT/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -165,11 +167,6 @@ void vPortTaskUsesFPU( void ); #define portNOP() __asm volatile( "NOP" ) #define portINLINE __inline -#ifdef __cplusplus - } /* extern C */ -#endif - - /* The number of bits to shift for an interrupt priority is dependent on the number of bits implemented by the interrupt controller. */ #if configUNIQUE_INTERRUPT_PRIORITIES == 16 @@ -208,4 +205,10 @@ number of bits implemented by the interrupt controller. */ #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h b/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h index d8d911f4f75..0199288824c 100644 --- a/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h +++ b/portable/GCC/ARM_CA53_64_BIT_SRE/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -165,11 +167,6 @@ void vPortTaskUsesFPU( void ); #define portNOP() __asm volatile( "NOP" ) #define portINLINE __inline -#ifdef __cplusplus - } /* extern C */ -#endif - - /* The number of bits to shift for an interrupt priority is dependent on the number of bits implemented by the interrupt controller. */ #if configUNIQUE_INTERRUPT_PRIORITIES == 16 @@ -193,4 +190,10 @@ number of bits implemented by the interrupt controller. */ #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CA9/portmacro.h b/portable/GCC/ARM_CA9/portmacro.h index d97fb535f71..6f1f2272456 100644 --- a/portable/GCC/ARM_CA9/portmacro.h +++ b/portable/GCC/ARM_CA9/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -162,11 +164,6 @@ by default. */ #define portNOP() __asm volatile( "NOP" ) #define portINLINE __inline -#ifdef __cplusplus - } /* extern C */ -#endif - - /* The number of bits to shift for an interrupt priority is dependent on the number of bits implemented by the interrupt controller. */ #if configUNIQUE_INTERRUPT_PRIORITIES == 16 @@ -205,4 +202,10 @@ number of bits implemented by the interrupt controller. */ #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index b9e9ef6623f..dc7f54578e2 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -156,7 +156,6 @@ portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) /*-----------------------------------------------------------*/ - /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index c6dad99857c..6a2ed748524 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index c6dad99857c..6a2ed748524 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index dd729f13f2c..36e38f75d21 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -243,8 +245,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 4fe8c59147a..27da496744d 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 4fe8c59147a..27da496744d 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h index 33bfb283461..dfc8365305e 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h index 33bfb283461..dfc8365305e 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -60,8 +62,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index 25058026973..b15193f1ee5 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -28,11 +28,13 @@ #ifndef PORTMACRO_H - #define PORTMACRO_H +#define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -301,12 +303,15 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) #ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY - #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html" + #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. *www.FreeRTOS.org/FreeRTOS-V10.3.x.html" #define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0 #endif /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index a3b2b46c989..443661a6d3a 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index adb47d8420f..3cde19a73d5 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index adb47d8420f..3cde19a73d5 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index 897fef5f468..82529f998ad 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -245,8 +247,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index fec6923394c..0fd11fcdf3a 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index fec6923394c..0fd11fcdf3a 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -65,8 +67,10 @@ #define portENABLE_INTERRUPTS() vClearInterruptMask( 0 ) /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/GCC/ARM_CRx_No_GIC/portmacro.h b/portable/GCC/ARM_CRx_No_GIC/portmacro.h index be5c6121e40..e8e54103439 100644 --- a/portable/GCC/ARM_CRx_No_GIC/portmacro.h +++ b/portable/GCC/ARM_CRx_No_GIC/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -173,9 +175,10 @@ void vPortTaskUsesFPU( void ); #define portMEMORY_BARRIER() __asm volatile( "" ::: "memory" ) +/* *INDENT-OFF* */ #ifdef __cplusplus - } /* extern C */ + } #endif - +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ATMega323/portmacro.h b/portable/GCC/ATMega323/portmacro.h index c22706de5d4..a9f317e6d65 100644 --- a/portable/GCC/ATMega323/portmacro.h +++ b/portable/GCC/ATMega323/portmacro.h @@ -36,9 +36,11 @@ Changes from V1.2.3 #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -104,8 +106,10 @@ extern void vPortYield( void ) __attribute__ ( ( naked ) ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/AVR32_UC3/portmacro.h b/portable/GCC/AVR32_UC3/portmacro.h index 86014a135b5..5bc7c8b0877 100644 --- a/portable/GCC/AVR32_UC3/portmacro.h +++ b/portable/GCC/AVR32_UC3/portmacro.h @@ -85,9 +85,11 @@ #include "intc.h" #include "compiler.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ @@ -691,8 +693,10 @@ extern void *pvPortRealloc( void *pv, size_t xSize ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/CORTUS_APS3/portmacro.h b/portable/GCC/CORTUS_APS3/portmacro.h index 86000665681..37e739bb55f 100644 --- a/portable/GCC/CORTUS_APS3/portmacro.h +++ b/portable/GCC/CORTUS_APS3/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include @@ -148,8 +150,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) /*---------------------------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/ColdFire_V2/portmacro.h b/portable/GCC/ColdFire_V2/portmacro.h index 9b4c1da8833..14d318b7b94 100644 --- a/portable/GCC/ColdFire_V2/portmacro.h +++ b/portable/GCC/ColdFire_V2/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,8 +108,10 @@ extern void vPortClearInterruptMaskFromISR( UBaseType_t ); #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired != pdFALSE ) { portYIELD(); } } while( 0 ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/H8S2329/portmacro.h b/portable/GCC/H8S2329/portmacro.h index 4c87abd163c..829ff9f9b64 100644 --- a/portable/GCC/H8S2329/portmacro.h +++ b/portable/GCC/H8S2329/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -133,8 +135,10 @@ extern void* pxCurrentTCB; \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/HCS12/portmacro.h b/portable/GCC/HCS12/portmacro.h index 9202510ccc8..a864419899f 100644 --- a/portable/GCC/HCS12/portmacro.h +++ b/portable/GCC/HCS12/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -241,8 +243,10 @@ typedef unsigned char UBaseType_t; #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/IA32_flat/portmacro.h b/portable/GCC/IA32_flat/portmacro.h index 060eb4daa69..3a0cd287c8e 100644 --- a/portable/GCC/IA32_flat/portmacro.h +++ b/portable/GCC/IA32_flat/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -284,8 +286,10 @@ above the max system call interrupt priority. */ #define portAPIC_PROCESSOR_PRIORITY ( *( ( volatile uint32_t * ) ( configAPIC_BASE + 0xA0UL ) ) ) #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() configASSERT( ( portAPIC_PROCESSOR_PRIORITY ) <= ( portMAX_API_CALL_PRIORITY ) ) +/* *INDENT-OFF* */ #ifdef __cplusplus - } /* extern C */ + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MSP430F449/portmacro.h b/portable/GCC/MSP430F449/portmacro.h index 8d6827a57e9..5a6e1db9c94 100644 --- a/portable/GCC/MSP430F449/portmacro.h +++ b/portable/GCC/MSP430F449/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -122,8 +124,10 @@ extern void vPortYield( void ) __attribute__ ( ( naked ) ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MicroBlaze/portmacro.h b/portable/GCC/MicroBlaze/portmacro.h index c646496ab3f..b04c526a42d 100644 --- a/portable/GCC/MicroBlaze/portmacro.h +++ b/portable/GCC/MicroBlaze/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -122,8 +124,10 @@ void vTaskSwitchContext(); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MicroBlazeV8/portmacro.h b/portable/GCC/MicroBlazeV8/portmacro.h index be9dfacc42f..3ade98795df 100644 --- a/portable/GCC/MicroBlazeV8/portmacro.h +++ b/portable/GCC/MicroBlazeV8/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* BSP includes. */ #include @@ -365,8 +367,10 @@ void vPortExceptionsInstallHandlers( void ); void vApplicationExceptionRegisterDump( xPortRegisterDump *xRegisterDump ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/MicroBlazeV9/portmacro.h b/portable/GCC/MicroBlazeV9/portmacro.h index c26daa79fc2..9d10b5e5beb 100644 --- a/portable/GCC/MicroBlazeV9/portmacro.h +++ b/portable/GCC/MicroBlazeV9/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* BSP includes. */ #include @@ -370,8 +372,10 @@ void vPortExceptionsInstallHandlers( void ); void vApplicationExceptionRegisterDump( xPortRegisterDump *xRegisterDump ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/NiosII/portmacro.h b/portable/GCC/NiosII/portmacro.h index 7a159925db2..3ac552dd8fa 100644 --- a/portable/GCC/NiosII/portmacro.h +++ b/portable/GCC/NiosII/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include "sys/alt_irq.h" @@ -104,8 +106,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/PPC405_Xilinx/portmacro.h b/portable/GCC/PPC405_Xilinx/portmacro.h index d7f32944c40..e6f1c06c845 100644 --- a/portable/GCC/PPC405_Xilinx/portmacro.h +++ b/portable/GCC/PPC405_Xilinx/portmacro.h @@ -31,9 +31,11 @@ #include "xexception_l.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -113,8 +115,10 @@ void vPortYield( void ); void vPortSetupInterruptController( void ); BaseType_t xPortInstallInterruptHandler( uint8_t ucInterruptID, XInterruptHandler pxHandler, void *pvCallBackRef ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/PPC440_Xilinx/portmacro.h b/portable/GCC/PPC440_Xilinx/portmacro.h index d7f32944c40..e6f1c06c845 100644 --- a/portable/GCC/PPC440_Xilinx/portmacro.h +++ b/portable/GCC/PPC440_Xilinx/portmacro.h @@ -31,9 +31,11 @@ #include "xexception_l.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -113,8 +115,10 @@ void vPortYield( void ); void vPortSetupInterruptController( void ); BaseType_t xPortInstallInterruptHandler( uint8_t ucInterruptID, XInterruptHandler pxHandler, void *pvCallBackRef ); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RISC-V/portmacro.h b/portable/GCC/RISC-V/portmacro.h index 1e72b1af547..727273aca5c 100644 --- a/portable/GCC/RISC-V/portmacro.h +++ b/portable/GCC/RISC-V/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -184,8 +186,10 @@ extern size_t xCriticalNesting; #error configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. Set them to zero if there is no MTIME (machine time) clock. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX100/portmacro.h b/portable/GCC/RX100/portmacro.h index 863596685aa..4556e147a5e 100644 --- a/portable/GCC/RX100/portmacro.h +++ b/portable/GCC/RX100/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -144,8 +146,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX200/portmacro.h b/portable/GCC/RX200/portmacro.h index 80fc8c8041d..40682825dd1 100644 --- a/portable/GCC/RX200/portmacro.h +++ b/portable/GCC/RX200/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX600/portmacro.h b/portable/GCC/RX600/portmacro.h index 7e1fd1e0c92..7e7bbe88b3c 100644 --- a/portable/GCC/RX600/portmacro.h +++ b/portable/GCC/RX600/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX600v2/portmacro.h b/portable/GCC/RX600v2/portmacro.h index 7e1fd1e0c92..7e7bbe88b3c 100644 --- a/portable/GCC/RX600v2/portmacro.h +++ b/portable/GCC/RX600v2/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ void vPortSetIPL( uint32_t ulNewIPL ) __attribute__((naked)); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/RX700v3_DPFPU/portmacro.h b/portable/GCC/RX700v3_DPFPU/portmacro.h index 6b76374c882..881610515ee 100644 --- a/portable/GCC/RX700v3_DPFPU/portmacro.h +++ b/portable/GCC/RX700v3_DPFPU/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -182,8 +184,10 @@ /* Definition to allow compatibility with existing FreeRTOS Demo using flop.c. */ #define portTASK_USES_FLOATING_POINT() vPortTaskUsesDPFPU() - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/STR75x/portmacro.h b/portable/GCC/STR75x/portmacro.h index 85262dac4d5..8d5e832c59e 100644 --- a/portable/GCC/STR75x/portmacro.h +++ b/portable/GCC/STR75x/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -135,8 +137,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/GCC/TriCore_1782/portmacro.h b/portable/GCC/TriCore_1782/portmacro.h index 734abc5949c..d17c7bc6882 100644 --- a/portable/GCC/TriCore_1782/portmacro.h +++ b/portable/GCC/TriCore_1782/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* System Includes. */ #include @@ -169,8 +171,10 @@ extern uint32_t uxPortSetInterruptMaskFromISR( void ); void vPortReclaimCSA( uint32_t *pxTCB ); #define portCLEAN_UP_TCB( pxTCB ) vPortReclaimCSA( ( uint32_t * ) ( pxTCB ) ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/78K0R/portmacro.h b/portable/IAR/78K0R/portmacro.h index e6f67cdd3b7..4ccc083c080 100644 --- a/portable/IAR/78K0R/portmacro.h +++ b/portable/IAR/78K0R/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -140,8 +142,10 @@ static __interrupt void P0_isr (void); #define OCD_ENABLED 0x81 #define OCD_ENABLED_ERASE 0x80 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CA5_No_GIC/portmacro.h b/portable/IAR/ARM_CA5_No_GIC/portmacro.h index 3d73f83fd16..b1dcf3878fb 100644 --- a/portable/IAR/ARM_CA5_No_GIC/portmacro.h +++ b/portable/IAR/ARM_CA5_No_GIC/portmacro.h @@ -29,14 +29,18 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + /* IAR includes. */ #ifdef __ICCARM__ #include - #ifdef __cplusplus - extern "C" { - #endif + /*----------------------------------------------------------- * Port specific definitions. @@ -146,11 +150,6 @@ #define portNOP() __asm volatile( "NOP" ) - - #ifdef __cplusplus - } /* extern C */ - #endif - /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in the source code because to do so would cause other compilers to generate warnings. */ @@ -159,4 +158,10 @@ #endif /* __ICCARM__ */ +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CA9/portmacro.h b/portable/IAR/ARM_CA9/portmacro.h index 87c83eaa05c..bce3013b5b4 100644 --- a/portable/IAR/ARM_CA9/portmacro.h +++ b/portable/IAR/ARM_CA9/portmacro.h @@ -29,15 +29,17 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + /* IAR includes. */ #ifdef __ICCARM__ #include - #ifdef __cplusplus - extern "C" { - #endif - /*----------------------------------------------------------- * Port specific definitions. * @@ -156,11 +158,6 @@ #define portNOP() __asm volatile( "NOP" ) - - #ifdef __cplusplus - } /* extern C */ - #endif - /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in the source code because to do so would cause other compilers to generate warnings. */ @@ -206,4 +203,10 @@ number of bits implemented by the interrupt controller. */ #define portICCBPR_BINARY_POINT_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCBPR_BINARY_POINT_OFFSET ) ) ) #define portICCRPR_RUNNING_PRIORITY_REGISTER ( *( ( const volatile uint32_t * ) ( portINTERRUPT_CONTROLLER_CPU_INTERFACE_ADDRESS + portICCRPR_RUNNING_PRIORITY_OFFSET ) ) ) +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacro.h b/portable/IAR/ARM_CM23/non_secure/portmacro.h index d845ac1caa0..b4d11dfdbcf 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h index d845ac1caa0..b4d11dfdbcf 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM3/portmacro.h b/portable/IAR/ARM_CM3/portmacro.h index c334978ea73..92763ac7809 100644 --- a/portable/IAR/ARM_CM3/portmacro.h +++ b/portable/IAR/ARM_CM3/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -203,8 +205,10 @@ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacro.h b/portable/IAR/ARM_CM33/non_secure/portmacro.h index b6df20eb88f..cbfa60241fa 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h index b6df20eb88f..cbfa60241fa 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacro.h b/portable/IAR/ARM_CM35P/non_secure/portmacro.h index a0efc1f9dcf..70b73a31794 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h index a0efc1f9dcf..70b73a31794 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -71,8 +73,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM4F/portmacro.h b/portable/IAR/ARM_CM4F/portmacro.h index 148c81d14ee..02f1d6ff038 100644 --- a/portable/IAR/ARM_CM4F/portmacro.h +++ b/portable/IAR/ARM_CM4F/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -202,8 +204,10 @@ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacro.h b/portable/IAR/ARM_CM55/non_secure/portmacro.h index a3b510e282c..7783218fe0b 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -76,8 +78,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h index a3b510e282c..7783218fe0b 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -76,8 +78,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM7/r0p1/portmacro.h b/portable/IAR/ARM_CM7/r0p1/portmacro.h index 08867c289c3..a2a2d878868 100644 --- a/portable/IAR/ARM_CM7/r0p1/portmacro.h +++ b/portable/IAR/ARM_CM7/r0p1/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -205,8 +207,10 @@ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacro.h b/portable/IAR/ARM_CM85/non_secure/portmacro.h index cfaae813eac..7aba706018e 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -76,8 +78,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h index cfaae813eac..7aba706018e 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ #include "portmacrocommon.h" @@ -76,8 +78,10 @@ #pragma diag_suppress=Pa082 /*-----------------------------------------------------------*/ +/* *INDENT-OFF* */ #ifdef __cplusplus } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index ca7e9225c05..a6b5b984879 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -29,9 +29,11 @@ #ifndef PORTMACROCOMMON_H #define PORTMACROCOMMON_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*------------------------------------------------------------------------------ * Port specific definitions. @@ -306,8 +308,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACROCOMMON_H */ diff --git a/portable/IAR/ARM_CRx_No_GIC/portmacro.h b/portable/IAR/ARM_CRx_No_GIC/portmacro.h index fbe4e166a59..82853af3ccf 100644 --- a/portable/IAR/ARM_CRx_No_GIC/portmacro.h +++ b/portable/IAR/ARM_CRx_No_GIC/portmacro.h @@ -31,9 +31,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -173,9 +175,10 @@ warnings. */ #pragma diag_suppress=Pe191 #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus - } /* extern C */ + } #endif - +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/ATMega323/portmacro.h b/portable/IAR/ATMega323/portmacro.h index 111f4d02667..69ba2f1d9ac 100644 --- a/portable/IAR/ATMega323/portmacro.h +++ b/portable/IAR/ATMega323/portmacro.h @@ -36,9 +36,11 @@ Changes from V1.2.3 #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,8 +108,10 @@ void vPortYield( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/AVR32_UC3/portmacro.h b/portable/IAR/AVR32_UC3/portmacro.h index eb48b59b589..73c206ca5f8 100644 --- a/portable/IAR/AVR32_UC3/portmacro.h +++ b/portable/IAR/AVR32_UC3/portmacro.h @@ -87,9 +87,11 @@ #include "intc.h" #include "compiler.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ @@ -679,8 +681,10 @@ extern void *pvPortRealloc( void *pv, size_t xSize ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/AtmelSAM7S64/portmacro.h b/portable/IAR/AtmelSAM7S64/portmacro.h index 173d3fbaa2d..01c6eed6aad 100644 --- a/portable/IAR/AtmelSAM7S64/portmacro.h +++ b/portable/IAR/AtmelSAM7S64/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -105,8 +107,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/AtmelSAM9XE/portmacro.h b/portable/IAR/AtmelSAM9XE/portmacro.h index 3d1d82ec17b..db6f10cca85 100644 --- a/portable/IAR/AtmelSAM9XE/portmacro.h +++ b/portable/IAR/AtmelSAM9XE/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -108,8 +110,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/LPC2000/portmacro.h b/portable/IAR/LPC2000/portmacro.h index f41d8b3445e..8fdf1fe75a3 100644 --- a/portable/IAR/LPC2000/portmacro.h +++ b/portable/IAR/LPC2000/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -107,8 +109,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RISC-V/portmacro.h b/portable/IAR/RISC-V/portmacro.h index 42def7367aa..5dddb454c79 100644 --- a/portable/IAR/RISC-V/portmacro.h +++ b/portable/IAR/RISC-V/portmacro.h @@ -32,9 +32,11 @@ #include "intrinsics.h" +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -176,8 +178,10 @@ extern size_t xCriticalNesting; #error configMTIME_BASE_ADDRESS and configMTIMECMP_BASE_ADDRESS must be defined in FreeRTOSConfig.h. Set them to zero if there is no MTIME (machine time) clock. See https://www.FreeRTOS.org/Using-FreeRTOS-on-RISC-V.html #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RL78/portmacro.h b/portable/IAR/RL78/portmacro.h index 9c74a3f0452..9685b569d5d 100644 --- a/portable/IAR/RL78/portmacro.h +++ b/portable/IAR/RL78/portmacro.h @@ -31,9 +31,11 @@ #ifdef __IAR_SYSTEMS_ICC__ +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -141,9 +143,11 @@ extern volatile uint16_t usCriticalNesting; \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} -#endif /* __cplusplus */ + } +#endif +/* *INDENT-ON* */ #endif /* __IAR_SYSTEMS_ICC__ */ diff --git a/portable/IAR/RX100/portmacro.h b/portable/IAR/RX100/portmacro.h index d408a1389a5..e53c43e4ce5 100644 --- a/portable/IAR/RX100/portmacro.h +++ b/portable/IAR/RX100/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -145,8 +147,10 @@ undefined - all warnings have been manually checked and are not an issue, and the warnings cannot be prevent by code changes without undesirable effects. */ #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RX600/portmacro.h b/portable/IAR/RX600/portmacro.h index e5decdaf34a..1b1bec3fc51 100644 --- a/portable/IAR/RX600/portmacro.h +++ b/portable/IAR/RX600/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -134,8 +136,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RX700v3_DPFPU/portmacro.h b/portable/IAR/RX700v3_DPFPU/portmacro.h index e0204fa0c3c..f8fde27908e 100644 --- a/portable/IAR/RX700v3_DPFPU/portmacro.h +++ b/portable/IAR/RX700v3_DPFPU/portmacro.h @@ -33,9 +33,11 @@ /* Hardware specifics. */ #include - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -191,8 +193,10 @@ * the warnings cannot be prevent by code changes without undesirable effects. */ #pragma diag_suppress=Pa082 - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/RXv2/portmacro.h b/portable/IAR/RXv2/portmacro.h index 984a4fb50e2..3f84484487d 100644 --- a/portable/IAR/RXv2/portmacro.h +++ b/portable/IAR/RXv2/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -139,8 +141,10 @@ undefined - all warnings have been manually checked and are not an issue, and the warnings cannot be prevent by code changes without undesirable effects. */ #pragma diag_suppress=Pa082 +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/STR71x/portmacro.h b/portable/IAR/STR71x/portmacro.h index 119eec84786..1fbadff3ebe 100644 --- a/portable/IAR/STR71x/portmacro.h +++ b/portable/IAR/STR71x/portmacro.h @@ -42,9 +42,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ @@ -115,8 +117,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/STR75x/portmacro.h b/portable/IAR/STR75x/portmacro.h index 674505d3f0c..94a7b72ed0b 100644 --- a/portable/IAR/STR75x/portmacro.h +++ b/portable/IAR/STR75x/portmacro.h @@ -42,9 +42,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ #define portCHAR char @@ -106,8 +108,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/STR91x/portmacro.h b/portable/IAR/STR91x/portmacro.h index 43ea6d7e8dc..b5429b57dbc 100644 --- a/portable/IAR/STR91x/portmacro.h +++ b/portable/IAR/STR91x/portmacro.h @@ -42,9 +42,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Type definitions. */ #define portCHAR char @@ -108,8 +110,10 @@ extern void vTaskSwitchContext( void ); \ #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/IAR/V850ES/portmacro.h b/portable/IAR/V850ES/portmacro.h index 76b1186761d..cfae9ae7ca9 100644 --- a/portable/IAR/V850ES/portmacro.h +++ b/portable/IAR/V850ES/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -130,8 +132,10 @@ extern void vTaskSwitchContext( void ); #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC24_dsPIC/portmacro.h b/portable/MPLAB/PIC24_dsPIC/portmacro.h index 83b695a43bb..0cc26f20633 100644 --- a/portable/MPLAB/PIC24_dsPIC/portmacro.h +++ b/portable/MPLAB/PIC24_dsPIC/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -108,8 +110,10 @@ extern void vPortYield( void ); #define portNOP() asm volatile ( "NOP" ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC32MEC14xx/portmacro.h b/portable/MPLAB/PIC32MEC14xx/portmacro.h index d9aad5df0aa..9e00a1c05cc 100644 --- a/portable/MPLAB/PIC32MEC14xx/portmacro.h +++ b/portable/MPLAB/PIC32MEC14xx/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -244,8 +246,10 @@ extern volatile UBaseType_t uxInterruptNesting; #define portREMOVE_STATIC_QUALIFIER #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC32MX/portmacro.h b/portable/MPLAB/PIC32MX/portmacro.h index e2a1078e631..e0bf8dbd130 100644 --- a/portable/MPLAB/PIC32MX/portmacro.h +++ b/portable/MPLAB/PIC32MX/portmacro.h @@ -32,9 +32,11 @@ /* System include files */ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -196,8 +198,10 @@ extern volatile UBaseType_t uxInterruptNesting; #define portREMOVE_STATIC_QUALIFIER #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MPLAB/PIC32MZ/portmacro.h b/portable/MPLAB/PIC32MZ/portmacro.h index 17b266e1124..371bcab6f26 100644 --- a/portable/MPLAB/PIC32MZ/portmacro.h +++ b/portable/MPLAB/PIC32MZ/portmacro.h @@ -32,9 +32,11 @@ /* System include files */ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -207,8 +209,10 @@ extern volatile UBaseType_t uxInterruptNesting; #define portREMOVE_STATIC_QUALIFIER #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/MikroC/ARM_CM4F/portmacro.h b/portable/MikroC/ARM_CM4F/portmacro.h index fa614c31488..b67a0a9d6a4 100644 --- a/portable/MikroC/ARM_CM4F/portmacro.h +++ b/portable/MikroC/ARM_CM4F/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -185,8 +187,10 @@ } /*-----------------------------------------------------------*/ - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Paradigm/Tern_EE/large_untested/portmacro.h b/portable/Paradigm/Tern_EE/large_untested/portmacro.h index ed513309c7c..292c9e260d6 100644 --- a/portable/Paradigm/Tern_EE/large_untested/portmacro.h +++ b/portable/Paradigm/Tern_EE/large_untested/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -100,9 +102,11 @@ typedef unsigned short UBaseType_t; #define portTASK_FUNCTION_PROTO( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) #define portTASK_FUNCTION( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Paradigm/Tern_EE/small/portmacro.h b/portable/Paradigm/Tern_EE/small/portmacro.h index 9eb5b863023..ff0b34b2065 100644 --- a/portable/Paradigm/Tern_EE/small/portmacro.h +++ b/portable/Paradigm/Tern_EE/small/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -102,8 +104,10 @@ typedef void ( __interrupt __far *pxISR )(); #define portTASK_FUNCTION_PROTO( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) #define portTASK_FUNCTION( vTaskFunction, vParameters ) void vTaskFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/RVDS/ARM_CA9/portmacro.h b/portable/RVDS/ARM_CA9/portmacro.h index 65dd8174f7c..1351dae9f60 100644 --- a/portable/RVDS/ARM_CA9/portmacro.h +++ b/portable/RVDS/ARM_CA9/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -158,8 +160,10 @@ void vPortTaskUsesFPU( void ); #define portNOP() __nop() +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX100/portmacro.h b/portable/Renesas/RX100/portmacro.h index 9bcbd3c8c3d..b82fdca2440 100644 --- a/portable/Renesas/RX100/portmacro.h +++ b/portable/Renesas/RX100/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -146,8 +148,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX200/portmacro.h b/portable/Renesas/RX200/portmacro.h index 62a085023e2..55278dacb15 100644 --- a/portable/Renesas/RX200/portmacro.h +++ b/portable/Renesas/RX200/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -136,8 +138,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX600/portmacro.h b/portable/Renesas/RX600/portmacro.h index 3b29cbddb50..60106ae13e7 100644 --- a/portable/Renesas/RX600/portmacro.h +++ b/portable/Renesas/RX600/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -137,8 +139,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX600v2/portmacro.h b/portable/Renesas/RX600v2/portmacro.h index d67a8f892aa..6efba647dbd 100644 --- a/portable/Renesas/RX600v2/portmacro.h +++ b/portable/Renesas/RX600v2/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include "machine.h" @@ -137,8 +139,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/RX700v3_DPFPU/portmacro.h b/portable/Renesas/RX700v3_DPFPU/portmacro.h index 12657ee82bf..fef13556e0d 100644 --- a/portable/Renesas/RX700v3_DPFPU/portmacro.h +++ b/portable/Renesas/RX700v3_DPFPU/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /* Hardware specifics. */ #include @@ -181,8 +183,10 @@ /* Definition to allow compatibility with existing FreeRTOS Demo using flop.c. */ #define portTASK_USES_FLOATING_POINT() vPortTaskUsesDPFPU() - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Renesas/SH2A_FPU/portmacro.h b/portable/Renesas/SH2A_FPU/portmacro.h index 422cd59ed48..0ff2e323afa 100644 --- a/portable/Renesas/SH2A_FPU/portmacro.h +++ b/portable/Renesas/SH2A_FPU/portmacro.h @@ -32,9 +32,11 @@ #include +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -132,8 +134,10 @@ extern void vTaskExitCritical( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/Tasking/ARM_CM4F/portmacro.h b/portable/Tasking/ARM_CM4F/portmacro.h index 3371f34f47a..1c3588b8630 100644 --- a/portable/Tasking/ARM_CM4F/portmacro.h +++ b/portable/Tasking/ARM_CM4F/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -128,8 +130,10 @@ #define portNOP() - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h index b82e48695d9..2c6979faf53 100644 --- a/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h +++ b/portable/ThirdParty/CDK/T-HEAD_CK802/portmacro.h @@ -30,10 +30,13 @@ #include extern void vPortYield(void); + +/* *INDENT-OFF* */ #ifdef __cplusplus -class vPortYield; -extern "C" { + class vPortYield; + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- @@ -154,8 +157,10 @@ extern portLONG pendsvflag; +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/ARC_v1/portmacro.h b/portable/ThirdParty/GCC/ARC_v1/portmacro.h index 72fbb49759e..137cbc05d93 100644 --- a/portable/ThirdParty/GCC/ARC_v1/portmacro.h +++ b/portable/ThirdParty/GCC/ARC_v1/portmacro.h @@ -30,9 +30,11 @@ #define PORTMACRO_H #include "embARC.h" - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ /* record stack high address for stack check */ #ifndef configRECORD_STACK_HIGH_ADDRESS @@ -143,8 +145,10 @@ void vPortYield( void ); void vPortYieldFromIsr( void ); - #ifdef __cplusplus -} - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/ATmega/portmacro.h b/portable/ThirdParty/GCC/ATmega/portmacro.h index 3e7714eb836..8292f2d7ed9 100644 --- a/portable/ThirdParty/GCC/ATmega/portmacro.h +++ b/portable/ThirdParty/GCC/ATmega/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -152,8 +154,10 @@ extern void vPortYieldFromISR( void ) __attribute__ ( ( naked ) ); #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index 68655861202..a3ab7d32393 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include @@ -130,8 +132,10 @@ extern unsigned long ulPortGetRunTime( void ); #define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() /* no-op */ #define portGET_RUN_TIME_COUNTER_VALUE() ulPortGetRunTime() +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 1043d2c5587..15f71811ea8 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ #include "pico.h" /*----------------------------------------------------------- @@ -147,8 +149,10 @@ #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h index eb731232f4f..2406bd921f7 100644 --- a/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h +++ b/portable/ThirdParty/GCC/RP2040/include/rp2040_config.h @@ -29,9 +29,11 @@ #ifndef RP2040_CONFIG_H #define RP2040_CONFIG_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /* configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 means set the exception handlers dynamically on cores * that need them in case the user has set up distinct vector table offsets per core @@ -63,8 +65,10 @@ extern "C" { #endif #endif +/* *INDENT-OFF* */ #ifdef __cplusplus -}; + } #endif +/* *INDENT-ON* */ #endif diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h index 18b47f3154e..738b5a18001 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/port_systick.h @@ -6,15 +6,19 @@ #pragma once +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /** * @brief Set up the SysTick interrupt */ void vPortSetupTimer(void); +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h index cb20b188504..78ef67da888 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/xtensa_config.h @@ -42,9 +42,11 @@ #ifndef XTENSA_CONFIG_H #define XTENSA_CONFIG_H - #ifdef __cplusplus - extern "C" { - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ #include #include @@ -146,9 +148,10 @@ #define XT_STACK_EXTRA ( XT_XTRA_SIZE ) #define XT_STACK_EXTRA_CLIB ( XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE ) - - #ifdef __cplusplus - } - #endif +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ #endif /* XTENSA_CONFIG_H */ diff --git a/portable/ThirdParty/XCC/Xtensa/portmacro.h b/portable/ThirdParty/XCC/Xtensa/portmacro.h index c81576c9f78..f84e4335e1e 100644 --- a/portable/ThirdParty/XCC/Xtensa/portmacro.h +++ b/portable/ThirdParty/XCC/Xtensa/portmacro.h @@ -30,9 +30,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #ifndef __ASSEMBLER__ @@ -204,8 +206,10 @@ static inline void vPortCleanUpTcbClib(struct _reent *ptr) #endif // __ASSEMBLER__ +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/ThirdParty/XCC/Xtensa/xtensa_config.h b/portable/ThirdParty/XCC/Xtensa/xtensa_config.h index 18f3e82c555..a5efcda5898 100644 --- a/portable/ThirdParty/XCC/Xtensa/xtensa_config.h +++ b/portable/ThirdParty/XCC/Xtensa/xtensa_config.h @@ -39,9 +39,11 @@ #ifndef XTENSA_CONFIG_H #define XTENSA_CONFIG_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ #include #include @@ -180,8 +182,10 @@ extern "C" { #define XT_STACK_EXTRA_CLIB (XT_XTRA_SIZE + XT_CLIB_CONTEXT_AREA_SIZE) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* XTENSA_CONFIG_H */ diff --git a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h index a2b4c16a40b..952e3f68021 100644 --- a/portable/oWatcom/16BitDOS/Flsh186/portmacro.h +++ b/portable/oWatcom/16BitDOS/Flsh186/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -105,8 +107,10 @@ void portENABLE_INTERRUPTS( void ); #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ diff --git a/portable/oWatcom/16BitDOS/PC/portmacro.h b/portable/oWatcom/16BitDOS/PC/portmacro.h index ab4eea87210..2fb7534117a 100644 --- a/portable/oWatcom/16BitDOS/PC/portmacro.h +++ b/portable/oWatcom/16BitDOS/PC/portmacro.h @@ -29,9 +29,11 @@ #ifndef PORTMACRO_H #define PORTMACRO_H +/* *INDENT-OFF* */ #ifdef __cplusplus -extern "C" { + extern "C" { #endif +/* *INDENT-ON* */ /*----------------------------------------------------------- * Port specific definitions. @@ -106,9 +108,11 @@ void portENABLE_INTERRUPTS( void ); #define portTASK_FUNCTION_PROTO( vTaskFunction, pvParameters ) void vTaskFunction( void *pvParameters ) #define portTASK_FUNCTION( vTaskFunction, pvParameters ) void vTaskFunction( void *pvParameters ) +/* *INDENT-OFF* */ #ifdef __cplusplus -} + } #endif +/* *INDENT-ON* */ #endif /* PORTMACRO_H */ From eb00a535a1bcfb4bb4570ff018fd3aa3e6ddade1 Mon Sep 17 00:00:00 2001 From: tcpluess Date: Fri, 28 Apr 2023 23:05:56 +0200 Subject: [PATCH 36/62] Paranthesize expression-like macro (#668) --- include/newlib-freertos.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/newlib-freertos.h b/include/newlib-freertos.h index 497ca529990..a65e62e8ef7 100644 --- a/include/newlib-freertos.h +++ b/include/newlib-freertos.h @@ -52,7 +52,7 @@ #endif #ifndef configSET_TLS_BLOCK - #define configSET_TLS_BLOCK( xTLSBlock ) _impure_ptr = &( xTLSBlock ) + #define configSET_TLS_BLOCK( xTLSBlock ) ( _impure_ptr = &( xTLSBlock ) ) #endif #ifndef configDEINIT_TLS_BLOCK From 4550780f0c686cb60c563d0f99f5d8079d864a71 Mon Sep 17 00:00:00 2001 From: Sudeep Mohanty <91244425+sudeep-mohanty@users.noreply.github.com> Date: Mon, 8 May 2023 08:57:28 +0200 Subject: [PATCH 37/62] Updated tasks.c checks for scheduler suspension (#670) This commit updates the checks for the variable uxSchedulerSuspended in tasks.c module to use a uniform format. Signed-off-by: Sudeep Mohanty --- tasks.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/tasks.c b/tasks.c index befd63e2279..59f96226bfc 100644 --- a/tasks.c +++ b/tasks.c @@ -387,7 +387,7 @@ const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; * kernel to move the task from the pending ready list into the real ready list * when the scheduler is unsuspended. The pending ready list itself can only be * accessed from a critical section. */ -PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U; #if ( configGENERATE_RUN_TIME_STATS == 1 ) @@ -1200,7 +1200,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { if( pxTCB == pxCurrentTCB ) { - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); portYIELD_WITHIN_API(); } else @@ -1223,7 +1223,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) configASSERT( pxPreviousWakeTime ); configASSERT( ( xTimeIncrement > 0U ) ); - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); vTaskSuspendAll(); { @@ -1309,7 +1309,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) /* A delay time of zero just forces a reschedule. */ if( xTicksToDelay > ( TickType_t ) 0U ) { - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); vTaskSuspendAll(); { traceTASK_DELAY(); @@ -1748,7 +1748,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) if( xSchedulerRunning != pdFALSE ) { /* The current task has just been suspended. */ - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); portYIELD_WITHIN_API(); } else @@ -1914,7 +1914,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) traceTASK_RESUME_FROM_ISR( pxTCB ); /* Check the ready lists can be accessed. */ - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { /* Ready lists can be accessed so move the task from the * suspended list to the ready list directly. */ @@ -2183,7 +2183,7 @@ BaseType_t xTaskResumeAll( void ) /* If uxSchedulerSuspended is zero then this function does not match a * previous call to vTaskSuspendAll(). */ - configASSERT( uxSchedulerSuspended ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); /* It is possible that an ISR caused a task to be removed from an event * list while the scheduler was suspended. If this was the case then the @@ -2194,7 +2194,7 @@ BaseType_t xTaskResumeAll( void ) { --uxSchedulerSuspended; - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) { @@ -2642,7 +2642,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char /* Arrange for xTickCount to reach xNextTaskUnblockTime in * xTaskIncrementTick() when the scheduler resumes. This ensures * that any delayed tasks are resumed at the correct time. */ - configASSERT( uxSchedulerSuspended ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); configASSERT( xTicksToJump != ( TickType_t ) 0 ); /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */ @@ -2671,7 +2671,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) /* Must not be called with the scheduler suspended as the implementation * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */ - configASSERT( uxSchedulerSuspended == 0 ); + configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U ); /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ @@ -2780,7 +2780,7 @@ BaseType_t xTaskIncrementTick( void ) * tasks to be unblocked. */ traceTASK_INCREMENT_TICK( xTickCount ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { /* Minor optimisation. The tick count cannot change in this * block. */ @@ -3060,7 +3060,7 @@ BaseType_t xTaskIncrementTick( void ) void vTaskSwitchContext( void ) { - if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended != ( UBaseType_t ) 0U ) { /* The scheduler is currently suspended - do not allow a context * switch. */ @@ -3165,7 +3165,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event groups implementation. */ - configASSERT( uxSchedulerSuspended != 0 ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); /* Store the item value in the event list item. It is safe to access the * event list item here as interrupts won't access the event list item of a @@ -3240,7 +3240,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) configASSERT( pxUnblockedTCB ); listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) ); prvAddTaskToReadyList( pxUnblockedTCB ); @@ -3293,7 +3293,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by * the event flags implementation. */ - configASSERT( uxSchedulerSuspended != pdFALSE ); + configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U ); /* Store the new item value in the event list. */ listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); @@ -4093,7 +4093,7 @@ static void prvResetNextTaskUnblockTime( void ) } else { - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { xReturn = taskSCHEDULER_RUNNING; } @@ -5113,7 +5113,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* The task should not have been on an event list. */ configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); @@ -5204,7 +5204,7 @@ TickType_t uxTaskResetEventItemValue( void ) /* The task should not have been on an event list. */ configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); - if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + if( uxSchedulerSuspended == ( UBaseType_t ) 0U ) { listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); prvAddTaskToReadyList( pxTCB ); From 153e52b7295fdc32c3f609a51d35fc5ba7f35698 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Mon, 8 May 2023 23:01:26 +0530 Subject: [PATCH 38/62] Fix cast alignment warning (#669) * Fix cast alignment warning Without this change, the code produces the following warning when compiled with `-Wcast-align` flag: ``` cast increases required alignment of target type ``` Signed-off-by: Gaurav Aggarwal --- .github/lexicon.txt | 1 + stream_buffer.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.github/lexicon.txt b/.github/lexicon.txt index e7ded970dbd..a4c02373eb8 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1606,6 +1606,7 @@ putchar puxstackbuffer puxvariabletoincrement pv +pvallocatedmemory pvbuffer pvcallbackref pvcomparand diff --git a/stream_buffer.c b/stream_buffer.c index 938c4051cec..7dbebbd624c 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -324,7 +324,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) { - uint8_t * pucAllocatedMemory; + void * pvAllocatedMemory; uint8_t ucFlags; /* In case the stream buffer is going to be used as a message buffer @@ -364,31 +364,31 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, if( xBufferSizeBytes < ( xBufferSizeBytes + 1 + sizeof( StreamBuffer_t ) ) ) { xBufferSizeBytes++; - pucAllocatedMemory = ( uint8_t * ) pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); /*lint !e9079 malloc() only returns void*. */ + pvAllocatedMemory = pvPortMalloc( xBufferSizeBytes + sizeof( StreamBuffer_t ) ); } else { - pucAllocatedMemory = NULL; + pvAllocatedMemory = NULL; } - if( pucAllocatedMemory != NULL ) + if( pvAllocatedMemory != NULL ) { - prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pucAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ - pucAllocatedMemory + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ + prvInitialiseNewStreamBuffer( ( StreamBuffer_t * ) pvAllocatedMemory, /* Structure at the start of the allocated memory. */ /*lint !e9087 Safe cast as allocated memory is aligned. */ /*lint !e826 Area is not too small and alignment is guaranteed provided malloc() behaves as expected and returns aligned buffer. */ + ( ( uint8_t * ) pvAllocatedMemory ) + sizeof( StreamBuffer_t ), /* Storage area follows. */ /*lint !e9016 Indexing past structure valid for uint8_t pointer, also storage area has no alignment requirement. */ xBufferSizeBytes, xTriggerLevelBytes, ucFlags, pxSendCompletedCallback, pxReceiveCompletedCallback ); - traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pucAllocatedMemory ), xIsMessageBuffer ); + traceSTREAM_BUFFER_CREATE( ( ( StreamBuffer_t * ) pvAllocatedMemory ), xIsMessageBuffer ); } else { traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); } - return ( StreamBufferHandle_t ) pucAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ + return ( StreamBufferHandle_t ) pvAllocatedMemory; /*lint !e9087 !e826 Safe cast as allocated memory is aligned. */ } #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ /*-----------------------------------------------------------*/ From 9149af9a6f647851bafecec75d27e39056b115f9 Mon Sep 17 00:00:00 2001 From: Monika Singh <108652024+moninom1@users.noreply.github.com> Date: Wed, 10 May 2023 12:11:52 +0530 Subject: [PATCH 39/62] Align StackSize and StackAddress for macOS (#674) --- portable/ThirdParty/GCC/Posix/port.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index d634c8b264e..5ac570d9a59 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -61,6 +61,10 @@ #include #include +#ifdef __APPLE__ + #include +#endif + /* Scheduler includes. */ #include "FreeRTOS.h" #include "task.h" @@ -146,6 +150,11 @@ portSTACK_TYPE * pxPortInitialiseStack( StackType_t * pxTopOfStack, pxTopOfStack = ( portSTACK_TYPE * ) thread - 1; ulStackSize = ( size_t )( pxTopOfStack + 1 - pxEndOfStack ) * sizeof( *pxTopOfStack ); + #ifdef __APPLE__ + pxEndOfStack = mach_vm_round_page ( pxEndOfStack ); + ulStackSize = mach_vm_trunc_page ( ulStackSize ); + #endif + thread->pxCode = pxCode; thread->pvParams = pvParameters; thread->xDying = pdFALSE; From a07f649bd50798cf7bd20325f12491c599a8bd39 Mon Sep 17 00:00:00 2001 From: Devaraj Ranganna Date: Thu, 11 May 2023 17:41:00 +0100 Subject: [PATCH 40/62] Armv8-M (except Cortex-M23) interrupt priority checking (#673) * Armv8-M: Formatting changes Signed-off-by: Devaraj Ranganna * Armv8-M: Add support for interrupt priority check FreeRTOS provides `FromISR` system calls which can be called directly from interrupt service routines. It is crucial that the priority of these ISRs is set to same or lower value (numerically higher) than that of `configMAX_SYSCALL_INTERRUPT_PRIORITY`. For more information refer to https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html. Add a check to trigger an assert when an ISR with priority higher (numerically lower) than `configMAX_SYSCALL_INTERRUPT_PRIORITY` calls `FromISR` system calls if `configASSERT` macro is defined. In addition, add a config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` to disable interrupt priority check while running on QEMU. Based on the discussion https://gitlab.com/qemu-project/qemu/-/issues/1122, The interrupt priority bits in QEMU do not match the real hardware. Therefore the assert that checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. The config option `configQEMU_DISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the `FreeRTOSConfig.h` for QEMU targets. Signed-off-by: Devaraj Ranganna * Use SHPR2 for calculating interrupt priority bits This removes the dependency on the secure software to mark the interrupt as non-secure. Signed-off-by: Gaurav Aggarwal --------- Signed-off-by: Devaraj Ranganna Signed-off-by: Gaurav Aggarwal Co-authored-by: Gaurav Aggarwal Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/ARMv8M/non_secure/port.c | 196 +++++++++++++ .../portable/GCC/ARM_CM23/portmacro.h | 19 +- .../portable/GCC/ARM_CM23_NTZ/portmacro.h | 19 +- .../portable/GCC/ARM_CM33/portmacro.h | 7 +- .../portable/GCC/ARM_CM33_NTZ/portmacro.h | 7 +- .../portable/GCC/ARM_CM35P/portmacro.h | 7 +- .../portable/GCC/ARM_CM55/portmacro.h | 7 +- .../portable/GCC/ARM_CM85/portmacro.h | 7 +- .../portable/IAR/ARM_CM23/portmacro.h | 17 +- .../portable/IAR/ARM_CM23_NTZ/portmacro.h | 17 +- .../portable/IAR/ARM_CM33/portmacro.h | 7 +- .../portable/IAR/ARM_CM33_NTZ/portmacro.h | 7 +- .../portable/IAR/ARM_CM35P/portmacro.h | 7 +- .../portable/IAR/ARM_CM55/portmacro.h | 7 +- .../portable/IAR/ARM_CM85/portmacro.h | 7 +- portable/ARMv8M/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM23/non_secure/port.c | 196 +++++++++++++ portable/GCC/ARM_CM23/non_secure/portmacro.h | 19 +- .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 196 +++++++++++++ .../GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 19 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM33/non_secure/port.c | 196 +++++++++++++ portable/GCC/ARM_CM33/non_secure/portmacro.h | 7 +- .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 196 +++++++++++++ .../GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 7 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM35P/non_secure/port.c | 196 +++++++++++++ portable/GCC/ARM_CM35P/non_secure/portmacro.h | 7 +- .../ARM_CM35P/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 196 +++++++++++++ .../GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 7 +- .../non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM55/non_secure/port.c | 196 +++++++++++++ portable/GCC/ARM_CM55/non_secure/portmacro.h | 7 +- .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 196 +++++++++++++ .../GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 7 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM85/non_secure/port.c | 196 +++++++++++++ portable/GCC/ARM_CM85/non_secure/portmacro.h | 7 +- .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 196 +++++++++++++ .../GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 7 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM23/non_secure/port.c | 196 +++++++++++++ portable/IAR/ARM_CM23/non_secure/portmacro.h | 17 +- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 196 +++++++++++++ .../IAR/ARM_CM23_NTZ/non_secure/portmacro.h | 17 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM33/non_secure/port.c | 196 +++++++++++++ portable/IAR/ARM_CM33/non_secure/portmacro.h | 7 +- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 196 +++++++++++++ .../IAR/ARM_CM33_NTZ/non_secure/portmacro.h | 7 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM35P/non_secure/port.c | 196 +++++++++++++ portable/IAR/ARM_CM35P/non_secure/portmacro.h | 7 +- .../ARM_CM35P/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 196 +++++++++++++ .../IAR/ARM_CM35P_NTZ/non_secure/portmacro.h | 7 +- .../non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM55/non_secure/port.c | 196 +++++++++++++ portable/IAR/ARM_CM55/non_secure/portmacro.h | 7 +- .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 196 +++++++++++++ .../IAR/ARM_CM55_NTZ/non_secure/portmacro.h | 7 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM85/non_secure/port.c | 196 +++++++++++++ portable/IAR/ARM_CM85/non_secure/portmacro.h | 7 +- .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 269 +++++++++--------- portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 196 +++++++++++++ .../IAR/ARM_CM85_NTZ/non_secure/portmacro.h | 7 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 269 +++++++++--------- 76 files changed, 7291 insertions(+), 2800 deletions(-) diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index 6a2ed748524..746f734b8ac 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -63,8 +66,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index 6a2ed748524..746f734b8ac 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -63,8 +66,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 27da496744d..19da9b0ecfe 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 27da496744d..19da9b0ecfe 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h index dfc8365305e..cc643459770 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index 3cde19a73d5..c9bad40cf98 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index 0fd11fcdf3a..c45dd21c29e 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h index b4d11dfdbcf..9cf0e87fbc8 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -62,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h index b4d11dfdbcf..9cf0e87fbc8 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -62,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h index cbfa60241fa..380768fc03b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,6 +49,7 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ @@ -59,6 +58,10 @@ #endif /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h index cbfa60241fa..815dca0861a 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h index 70b73a31794..46bc4e24b56 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM35P/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h index 7783218fe0b..7829ee6186a 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM55/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h index 7aba706018e..3b51cb5ff46 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM85/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index 6a2ed748524..746f734b8ac 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -63,8 +66,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index 6a2ed748524..746f734b8ac 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,12 +48,17 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __attribute__( ( used ) ) +#define portNORETURN __attribute__( ( noreturn ) ) +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -63,8 +66,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 27da496744d..19da9b0ecfe 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 27da496744d..19da9b0ecfe 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM35P/non_secure/port.c +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h index dfc8365305e..cc643459770 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h index dfc8365305e..cc643459770 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,10 +49,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index 3cde19a73d5..c9bad40cf98 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index 3cde19a73d5..c9bad40cf98 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index 0fd11fcdf3a..c45dd21c29e 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index 0fd11fcdf3a..c45dd21c29e 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,10 +54,15 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23/non_secure/portmacro.h b/portable/IAR/ARM_CM23/non_secure/portmacro.h index b4d11dfdbcf..9cf0e87fbc8 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -62,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h index b4d11dfdbcf..9cf0e87fbc8 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -50,11 +48,16 @@ /** * Architecture specifics. */ -#define portARCH_NAME "Cortex-M23" -#define portDONT_DISCARD __root +#define portARCH_NAME "Cortex-M23" +#define portHAS_BASEPRI 0 +#define portDONT_DISCARD __root +/*-----------------------------------------------------------*/ + +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" /*-----------------------------------------------------------*/ -#if( configTOTAL_MPU_REGIONS == 16 ) +#if ( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif /*-----------------------------------------------------------*/ @@ -62,8 +65,8 @@ /** * @brief Critical section management. */ -#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) -#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) +#define portDISABLE_INTERRUPTS() __asm volatile ( " cpsid i " ::: "memory" ) +#define portENABLE_INTERRUPTS() __asm volatile ( " cpsie i " ::: "memory" ) /*-----------------------------------------------------------*/ /* Suppress warnings that are generated by the IAR tools, but cannot be fixed in diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacro.h b/portable/IAR/ARM_CM33/non_secure/portmacro.h index cbfa60241fa..380768fc03b 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,6 +49,7 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ @@ -59,6 +58,10 @@ #endif /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + /** * @brief Critical section management. */ diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h index cbfa60241fa..815dca0861a 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M33" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM35P/non_secure/port.c +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacro.h b/portable/IAR/ARM_CM35P/non_secure/portmacro.h index 70b73a31794..46bc4e24b56 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h index 70b73a31794..46bc4e24b56 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -51,9 +49,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M35P" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55/non_secure/portmacro.h b/portable/IAR/ARM_CM55/non_secure/portmacro.h index 7783218fe0b..7829ee6186a 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h index 7783218fe0b..7829ee6186a 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M55" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85/non_secure/portmacro.h b/portable/IAR/ARM_CM85/non_secure/portmacro.h index 7aba706018e..3b51cb5ff46 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index 9976daee49a..7bbe1b7bc53 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -94,6 +94,19 @@ #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants required to check the validity of an interrupt priority. + */ +#define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xE000ED1C ) ) +#define portFIRST_USER_INTERRUPT_NUMBER ( 16 ) +#define portNVIC_IP_REGISTERS_OFFSET_16 ( 0xE000E3F0 ) +#define portAIRCR_REG ( *( ( volatile uint32_t * ) 0xE000ED0C ) ) +#define portTOP_BIT_OF_BYTE ( ( uint8_t ) 0x80 ) +#define portMAX_PRIGROUP_BITS ( ( uint8_t ) 7 ) +#define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) +#define portPRIGROUP_SHIFT ( 8UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -369,6 +382,19 @@ PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL; PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT; #endif /* configENABLE_TRUSTZONE */ +/** + * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure + * FreeRTOS API functions are not called from interrupts that have been assigned + * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY. + */ +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + static uint8_t ucMaxSysCallPriority = 0; + static uint32_t ulMaxPRIGROUPValue = 0; + static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16; + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + #if ( configUSE_TICKLESS_IDLE == 1 ) /** @@ -944,6 +970,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } } /*-----------------------------------------------------------*/ + /* *INDENT-OFF* */ #if ( configENABLE_MPU == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -1069,6 +1096,114 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ { + #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + { + volatile uint32_t ulOriginalPriority; + volatile uint32_t ulImplementedPrioBits = 0; + volatile uint8_t ucMaxPriorityValue; + + /* Determine the maximum priority from which ISR safe FreeRTOS API + * functions can be called. ISR safe functions are those that end in + * "FromISR". FreeRTOS maintains separate thread and ISR API functions to + * ensure interrupt entry is as fast and simple as possible. + * + * Save the interrupt priority value that is about to be clobbered. */ + ulOriginalPriority = portNVIC_SHPR2_REG; + + /* Determine the number of priority bits available. First write to all + * possible bits. */ + portNVIC_SHPR2_REG = 0xFF000000; + + /* Read the value back to see how many bits stuck. */ + ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 ); + + /* Use the same mask on the maximum system call priority. */ + ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue; + + /* Check that the maximum system call priority is nonzero after + * accounting for the number of priority bits supported by the + * hardware. A priority of 0 is invalid because setting the BASEPRI + * register to 0 unmasks all interrupts, and interrupts with priority 0 + * cannot be masked using BASEPRI. + * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + configASSERT( ucMaxSysCallPriority ); + + /* Calculate the maximum acceptable priority group value for the number + * of bits read back. */ + + while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE ) + { + ulImplementedPrioBits++; + ucMaxPriorityValue <<= ( uint8_t ) 0x01; + } + + if( ulImplementedPrioBits == 8 ) + { + /* When the hardware implements 8 priority bits, there is no way for + * the software to configure PRIGROUP to not have sub-priorities. As + * a result, the least significant bit is always used for sub-priority + * and there are 128 preemption priorities and 2 sub-priorities. + * + * This may cause some confusion in some cases - for example, if + * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4 + * priority interrupts will be masked in Critical Sections as those + * are at the same preemption priority. This may appear confusing as + * 4 is higher (numerically lower) priority than + * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not + * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY + * to 4, this confusion does not happen and the behaviour remains the same. + * + * The following assert ensures that the sub-priority bit in the + * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned + * confusion. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U ); + ulMaxPRIGROUPValue = 0; + } + else + { + ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; + } + + /* The interrupt priority bits are not modelled in QEMU and the assert that + * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. + * Therefore, this assert is not adding any value for QEMU targets. The config + * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the + * `FreeRTOSConfig.h` for QEMU targets. */ + #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK + { + #ifdef __NVIC_PRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); + } + #endif /* __NVIC_PRIO_BITS */ + + #ifdef configPRIO_BITS + { + /* + * Check that the number of implemented priority bits queried from + * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. + */ + configASSERT( ulImplementedPrioBits == configPRIO_BITS ); + } + #endif /* configPRIO_BITS */ + } + #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ + + /* Shift the priority group value back to its position within the AIRCR + * register. */ + ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; + ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK; + + /* Restore the clobbered interrupt priority register to its original + * value. */ + portNVIC_SHPR2_REG = ulOriginalPriority; + } + #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ + /* Make PendSV, CallSV and SysTick the same priority as the kernel. */ portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI; portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI; @@ -1259,3 +1394,64 @@ BaseType_t xPortIsInsideInterrupt( void ) return xReturn; } /*-----------------------------------------------------------*/ + +#if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) + + void vPortValidateInterruptPriority( void ) + { + uint32_t ulCurrentInterrupt; + uint8_t ucCurrentPriority; + + /* Obtain the number of the currently executing interrupt. */ + __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" ); + + /* Is the interrupt number a user defined interrupt? */ + if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER ) + { + /* Look up the interrupt's priority. */ + ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ]; + + /* The following assertion will fail if a service routine (ISR) for + * an interrupt that has been assigned a priority above + * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API + * function. ISR safe FreeRTOS API functions must *only* be called + * from interrupts that have been assigned a priority at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Numerically low interrupt priority numbers represent logically high + * interrupt priorities, therefore the priority of the interrupt must + * be set to a value equal to or numerically *higher* than + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + * + * Interrupts that use the FreeRTOS API must not be left at their + * default priority of zero as that is the highest possible priority, + * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY, + * and therefore also guaranteed to be invalid. + * + * FreeRTOS maintains separate thread and ISR API functions to ensure + * interrupt entry is as fast and simple as possible. + * + * The following links provide detailed information: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html + * https://www.FreeRTOS.org/FAQHelp.html */ + configASSERT( ucCurrentPriority >= ucMaxSysCallPriority ); + } + + /* Priority grouping: The interrupt controller (NVIC) allows the bits + * that define each interrupt's priority to be split between bits that + * define the interrupt's pre-emption priority bits and bits that define + * the interrupt's sub-priority. For simplicity all bits must be defined + * to be pre-emption priority bits. The following assertion will fail if + * this is not the case (if some bits represent a sub-priority). + * + * If the application only uses CMSIS libraries for interrupt + * configuration then the correct setting can be achieved on all Cortex-M + * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the + * scheduler. Note however that some vendor specific peripheral libraries + * assume a non-zero priority group setting, in which cases using a value + * of zero will result in unpredictable behaviour. */ + configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue ); + } + +#endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h index 7aba706018e..3b51cb5ff46 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacro.h @@ -35,8 +35,6 @@ #endif /* *INDENT-ON* */ -#include "portmacrocommon.h" - /*------------------------------------------------------------------------------ * Port specific definitions. * @@ -56,9 +54,14 @@ * Architecture specifics. */ #define portARCH_NAME "Cortex-M85" +#define portHAS_BASEPRI 1 #define portDONT_DISCARD __root /*-----------------------------------------------------------*/ +/* ARMv8-M common port configurations. */ +#include "portmacrocommon.h" +/*-----------------------------------------------------------*/ + #if( configTOTAL_MPU_REGIONS == 16 ) #error 16 MPU regions are not yet supported for this port. #endif diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index a6b5b984879..c2ca5fa7730 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -27,7 +27,7 @@ */ #ifndef PORTMACROCOMMON_H - #define PORTMACROCOMMON_H +#define PORTMACROCOMMON_H /* *INDENT-OFF* */ #ifdef __cplusplus @@ -45,114 +45,114 @@ *------------------------------------------------------------------------------ */ - #ifndef configENABLE_FPU - #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. - #endif /* configENABLE_FPU */ +#ifndef configENABLE_FPU + #error configENABLE_FPU must be defined in FreeRTOSConfig.h. Set configENABLE_FPU to 1 to enable the FPU or 0 to disable the FPU. +#endif /* configENABLE_FPU */ - #ifndef configENABLE_MPU - #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. - #endif /* configENABLE_MPU */ +#ifndef configENABLE_MPU + #error configENABLE_MPU must be defined in FreeRTOSConfig.h. Set configENABLE_MPU to 1 to enable the MPU or 0 to disable the MPU. +#endif /* configENABLE_MPU */ - #ifndef configENABLE_TRUSTZONE - #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. - #endif /* configENABLE_TRUSTZONE */ +#ifndef configENABLE_TRUSTZONE + #error configENABLE_TRUSTZONE must be defined in FreeRTOSConfig.h. Set configENABLE_TRUSTZONE to 1 to enable TrustZone or 0 to disable TrustZone. +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ /** * @brief Type definitions. */ - #define portCHAR char - #define portFLOAT float - #define portDOUBLE double - #define portLONG long - #define portSHORT short - #define portSTACK_TYPE uint32_t - #define portBASE_TYPE long - - typedef portSTACK_TYPE StackType_t; - typedef long BaseType_t; - typedef unsigned long UBaseType_t; - - #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) - typedef uint16_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffff - #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) - typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) 0xffffffffUL +#define portCHAR char +#define portFLOAT float +#define portDOUBLE double +#define portLONG long +#define portSHORT short +#define portSTACK_TYPE uint32_t +#define portBASE_TYPE long + +typedef portSTACK_TYPE StackType_t; +typedef long BaseType_t; +typedef unsigned long UBaseType_t; + +#if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS ) + typedef uint16_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffff +#elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) + typedef uint32_t TickType_t; + #define portMAX_DELAY ( TickType_t ) 0xffffffffUL /* 32-bit tick type on a 32-bit architecture, so reads of the tick count do * not need to be guarded with a critical section. */ - #define portTICK_TYPE_IS_ATOMIC 1 - #else - #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. - #endif + #define portTICK_TYPE_IS_ATOMIC 1 +#else + #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. +#endif /*-----------------------------------------------------------*/ /** * Architecture specifics. */ - #define portSTACK_GROWTH ( -1 ) - #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) - #define portBYTE_ALIGNMENT 8 - #define portNOP() - #define portINLINE __inline - #ifndef portFORCE_INLINE - #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) - #endif - #define portHAS_STACK_OVERFLOW_CHECKING 1 +#define portSTACK_GROWTH ( -1 ) +#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) +#define portBYTE_ALIGNMENT 8 +#define portNOP() +#define portINLINE __inline +#ifndef portFORCE_INLINE + #define portFORCE_INLINE inline __attribute__( ( always_inline ) ) +#endif +#define portHAS_STACK_OVERFLOW_CHECKING 1 /*-----------------------------------------------------------*/ /** * @brief Extern declarations. */ - extern BaseType_t xPortIsInsideInterrupt( void ); +extern BaseType_t xPortIsInsideInterrupt( void ); - extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortYield( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; - extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */; +extern void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */; - extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern uint32_t ulSetInterruptMask( void ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; +extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) PRIVILEGED_FUNCTION */; - #if ( configENABLE_TRUSTZONE == 1 ) - extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ - extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; - #endif /* configENABLE_TRUSTZONE */ +#if ( configENABLE_TRUSTZONE == 1 ) + extern void vPortAllocateSecureContext( uint32_t ulSecureStackSize ); /* __attribute__ (( naked )) */ + extern void vPortFreeSecureContext( uint32_t * pulTCB ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */; +#endif /* configENABLE_TRUSTZONE */ - #if ( configENABLE_MPU == 1 ) - extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; - extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + extern BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */; + extern void vResetPrivilege( void ) /* __attribute__ (( naked )) */; +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief MPU specific constants. */ - #if ( configENABLE_MPU == 1 ) - #define portUSING_MPU_WRAPPERS 1 - #define portPRIVILEGE_BIT ( 0x80000000UL ) - #else - #define portPRIVILEGE_BIT ( 0x0UL ) - #endif /* configENABLE_MPU */ +#if ( configENABLE_MPU == 1 ) + #define portUSING_MPU_WRAPPERS 1 + #define portPRIVILEGE_BIT ( 0x80000000UL ) +#else + #define portPRIVILEGE_BIT ( 0x0UL ) +#endif /* configENABLE_MPU */ /* MPU settings that can be overriden in FreeRTOSConfig.h. */ #ifndef configTOTAL_MPU_REGIONS /* Define to 8 for backward compatibility. */ - #define configTOTAL_MPU_REGIONS ( 8UL ) + #define configTOTAL_MPU_REGIONS ( 8UL ) #endif /* MPU regions. */ - #define portPRIVILEGED_FLASH_REGION ( 0UL ) - #define portUNPRIVILEGED_FLASH_REGION ( 1UL ) - #define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) - #define portPRIVILEGED_RAM_REGION ( 3UL ) - #define portSTACK_REGION ( 4UL ) - #define portFIRST_CONFIGURABLE_REGION ( 5UL ) - #define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) - #define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) - #define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ +#define portPRIVILEGED_FLASH_REGION ( 0UL ) +#define portUNPRIVILEGED_FLASH_REGION ( 1UL ) +#define portUNPRIVILEGED_SYSCALLS_REGION ( 2UL ) +#define portPRIVILEGED_RAM_REGION ( 3UL ) +#define portSTACK_REGION ( 4UL ) +#define portFIRST_CONFIGURABLE_REGION ( 5UL ) +#define portLAST_CONFIGURABLE_REGION ( configTOTAL_MPU_REGIONS - 1UL ) +#define portNUM_CONFIGURABLE_REGIONS ( ( portLAST_CONFIGURABLE_REGION - portFIRST_CONFIGURABLE_REGION ) + 1 ) +#define portTOTAL_NUM_REGIONS ( portNUM_CONFIGURABLE_REGIONS + 1 ) /* Plus one to make space for the stack region. */ /* Device memory attributes used in MPU_MAIR registers. * @@ -164,92 +164,105 @@ * 11 --> Device-GRE * Bit[1:0] - 00, Reserved. */ - #define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ - #define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ - #define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ - #define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ +#define portMPU_DEVICE_MEMORY_nGnRnE ( 0x00 ) /* 0000 0000 */ +#define portMPU_DEVICE_MEMORY_nGnRE ( 0x04 ) /* 0000 0100 */ +#define portMPU_DEVICE_MEMORY_nGRE ( 0x08 ) /* 0000 1000 */ +#define portMPU_DEVICE_MEMORY_GRE ( 0x0C ) /* 0000 1100 */ /* Normal memory attributes used in MPU_MAIR registers. */ - #define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ - #define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ +#define portMPU_NORMAL_MEMORY_NON_CACHEABLE ( 0x44 ) /* Non-cacheable. */ +#define portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE ( 0xFF ) /* Non-Transient, Write-back, Read-Allocate and Write-Allocate. */ /* Attributes used in MPU_RBAR registers. */ - #define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) - #define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) - #define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) +#define portMPU_REGION_NON_SHAREABLE ( 0UL << 3UL ) +#define portMPU_REGION_INNER_SHAREABLE ( 1UL << 3UL ) +#define portMPU_REGION_OUTER_SHAREABLE ( 2UL << 3UL ) - #define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) - #define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) - #define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) - #define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_WRITE ( 0UL << 1UL ) +#define portMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define portMPU_REGION_PRIVILEGED_READ_ONLY ( 2UL << 1UL ) +#define portMPU_REGION_READ_ONLY ( 3UL << 1UL ) - #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) +#define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ /** * @brief Settings to define an MPU region. */ - typedef struct MPURegionSettings - { - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ - } MPURegionSettings_t; +typedef struct MPURegionSettings +{ + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ +} MPURegionSettings_t; /** * @brief MPU settings as stored in the TCB. */ - typedef struct MPU_SETTINGS - { - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ - } xMPU_SETTINGS; +typedef struct MPU_SETTINGS +{ + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ +} xMPU_SETTINGS; /*-----------------------------------------------------------*/ +/** + * @brief Validate priority of ISRs that are allowed to call FreeRTOS + * system calls. + */ +#ifdef configASSERT + #if ( portHAS_BASEPRI == 1 ) + void vPortValidateInterruptPriority( void ); + #define portASSERT_IF_INTERRUPT_PRIORITY_INVALID() vPortValidateInterruptPriority() + #endif +#endif + /** * @brief SVC numbers. */ - #define portSVC_ALLOCATE_SECURE_CONTEXT 0 - #define portSVC_FREE_SECURE_CONTEXT 1 - #define portSVC_START_SCHEDULER 2 - #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_ALLOCATE_SECURE_CONTEXT 0 +#define portSVC_FREE_SECURE_CONTEXT 1 +#define portSVC_START_SCHEDULER 2 +#define portSVC_RAISE_PRIVILEGE 3 /*-----------------------------------------------------------*/ /** * @brief Scheduler utilities. */ - #define portYIELD() vPortYield() - #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) - #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) - #define portEND_SWITCHING_ISR( xSwitchRequired ) do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } while( 0 ) - #define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) +#define portYIELD() vPortYield() +#define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) ) +#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL ) +#define portEND_SWITCHING_ISR( xSwitchRequired ) \ + do { if( xSwitchRequired ) portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT; } \ + while( 0 ) +#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x ) /*-----------------------------------------------------------*/ /** * @brief Critical section management. */ - #define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() - #define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) - #define portENTER_CRITICAL() vPortEnterCritical() - #define portEXIT_CRITICAL() vPortExitCritical() +#define portSET_INTERRUPT_MASK_FROM_ISR() ulSetInterruptMask() +#define portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) vClearInterruptMask( x ) +#define portENTER_CRITICAL() vPortEnterCritical() +#define portEXIT_CRITICAL() vPortExitCritical() /*-----------------------------------------------------------*/ /** * @brief Tickless idle/low power functionality. */ - #ifndef portSUPPRESS_TICKS_AND_SLEEP - extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); - #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) - #endif +#ifndef portSUPPRESS_TICKS_AND_SLEEP + extern void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime ); + #define portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ) vPortSuppressTicksAndSleep( xExpectedIdleTime ) +#endif /*-----------------------------------------------------------*/ /** * @brief Task function macros as described on the FreeRTOS.org WEB site. */ - #define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) - #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void * pvParameters ) +#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void * pvParameters ) /*-----------------------------------------------------------*/ - #if ( configENABLE_TRUSTZONE == 1 ) +#if ( configENABLE_TRUSTZONE == 1 ) /** * @brief Allocate a secure context for the task. @@ -260,7 +273,7 @@ * * @param[in] ulSecureStackSize The size of the secure stack to be allocated. */ - #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) + #define portALLOCATE_SECURE_CONTEXT( ulSecureStackSize ) vPortAllocateSecureContext( ulSecureStackSize ) /** * @brief Called when a task is deleted to delete the task's secure context, @@ -268,18 +281,18 @@ * * @param[in] pxTCB The TCB of the task being deleted. */ - #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) - #endif /* configENABLE_TRUSTZONE */ + #define portCLEAN_UP_TCB( pxTCB ) vPortFreeSecureContext( ( uint32_t * ) pxTCB ) +#endif /* configENABLE_TRUSTZONE */ /*-----------------------------------------------------------*/ - #if ( configENABLE_MPU == 1 ) +#if ( configENABLE_MPU == 1 ) /** * @brief Checks whether or not the processor is privileged. * * @return 1 if the processor is already privileged, 0 otherwise. */ - #define portIS_PRIVILEGED() xIsPrivileged() + #define portIS_PRIVILEGED() xIsPrivileged() /** * @brief Raise an SVC request to raise privilege. @@ -288,24 +301,24 @@ * then it raises the privilege. If this is called from any other place, * the privilege is not raised. */ - #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); + #define portRAISE_PRIVILEGE() __asm volatile ( "svc %0 \n" ::"i" ( portSVC_RAISE_PRIVILEGE ) : "memory" ); /** * @brief Lowers the privilege level by setting the bit 0 of the CONTROL * register. */ - #define portRESET_PRIVILEGE() vResetPrivilege() - #else - #define portIS_PRIVILEGED() - #define portRAISE_PRIVILEGE() - #define portRESET_PRIVILEGE() - #endif /* configENABLE_MPU */ + #define portRESET_PRIVILEGE() vResetPrivilege() +#else + #define portIS_PRIVILEGED() + #define portRAISE_PRIVILEGE() + #define portRESET_PRIVILEGE() +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ /** * @brief Barriers. */ - #define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) +#define portMEMORY_BARRIER() __asm volatile ( "" ::: "memory" ) /*-----------------------------------------------------------*/ /* *INDENT-OFF* */ From 97434a4e0cfcf8dbd5cceeaf68c3f37121b03996 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Mon, 15 May 2023 23:29:04 +0530 Subject: [PATCH 41/62] Use the extended movx instruction instead of mov (#676) The following is from the MSP430X instruction set - ``` MOVX.W Move source word to destination word. The source operand is copied to the destination. The source operand is not affected. Both operands may be located in the full address space. ``` The movx instruction allows both the operands to be located in the full address space and therefore, works with large data model as well. Signed-off-by: Gaurav Aggarwal --- portable/CCS/MSP430X/portext.asm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/portable/CCS/MSP430X/portext.asm b/portable/CCS/MSP430X/portext.asm index 9ebfa995016..9fe306e9401 100644 --- a/portable/CCS/MSP430X/portext.asm +++ b/portable/CCS/MSP430X/portext.asm @@ -48,7 +48,7 @@ portSAVE_CONTEXT .macro ;Save the remaining registers. pushm_x #12, r15 - mov.w &usCriticalNesting, r14 + movx.w &usCriticalNesting, r14 push_x r14 mov_x &pxCurrentTCB, r12 mov_x sp, 0( r12 ) @@ -60,7 +60,7 @@ portRESTORE_CONTEXT .macro mov_x &pxCurrentTCB, r12 mov_x @r12, sp pop_x r15 - mov.w r15, &usCriticalNesting + movx.w r15, &usCriticalNesting popm_x #12, r15 nop pop.w sr From 953c1ee6cbf460092826bab772e4119c6d9e51e8 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Thu, 25 May 2023 16:33:10 +0530 Subject: [PATCH 42/62] Fix eTaskGetState for pending ready tasks (#679) This commit fixes eTaskGetState so that eReady is returned for pending ready tasks. Co-authored-by: Darian Leung --- tasks.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tasks.c b/tasks.c index 59f96226bfc..97539d7b6cd 100644 --- a/tasks.c +++ b/tasks.c @@ -1351,6 +1351,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { eTaskState eReturn; List_t const * pxStateList; + List_t const * pxEventList; List_t const * pxDelayedList; List_t const * pxOverflowedDelayedList; const TCB_t * const pxTCB = xTask; @@ -1367,12 +1368,20 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) taskENTER_CRITICAL(); { pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ); pxDelayedList = pxDelayedTaskList; pxOverflowedDelayedList = pxOverflowDelayedTaskList; } taskEXIT_CRITICAL(); - if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + if( pxEventList == &xPendingReadyList ) + { + /* The task has been placed on the pending ready list, so its + * state is eReady regardless of what list the task's state list + * item is currently placed on. */ + eReturn = eReady; + } + else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) { /* The task being queried is referenced from one of the Blocked * lists. */ From d43680169b14ae049a1ef5ab19e97768ac31cf7a Mon Sep 17 00:00:00 2001 From: Tony Josi Date: Wed, 31 May 2023 10:04:18 +0000 Subject: [PATCH 43/62] Generates SBOM after source files are updated with release tag (#680) * update source file with release version info before SBOM generation * delete tag branch during cleanup --- .github/workflows/auto-release.yml | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index 3bf820a6f9c..426d8f0c01a 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -25,7 +25,6 @@ jobs: - name: Tool Setup uses: actions/setup-python@v2 with: - python-version: 3.7.10 architecture: x64 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -51,7 +50,18 @@ jobs: - name: create a new branch that references commit id working-directory: ./local_kernel - run: git checkout -b ${{ github.event.inputs.version_number }} ${{ github.event.inputs.commit_id }} + run: | + git checkout -b ${{ github.event.inputs.version_number }} ${{ github.event.inputs.commit_id }} + echo "COMMIT_SHA_1=$(git rev-parse HEAD)" >> $GITHUB_ENV + + - name: Update source files with version info + run: | + # Install deps and run + pip install -r ./tools/.github/scripts/release-requirements.txt + ./tools/.github/scripts/update_src_version.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA_1 }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} + exit $? + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Generate SBOM uses: FreeRTOS/CI-CD-Github-Actions/sbom-generator@main @@ -65,13 +75,19 @@ jobs: git add . git commit -m 'Update SBOM' git push -u origin ${{ github.event.inputs.version_number }} - echo "COMMIT_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV + echo "COMMIT_SHA_2=$(git rev-parse HEAD)" >> $GITHUB_ENV - name: Release run: | # Install deps and run pip install -r ./tools/.github/scripts/release-requirements.txt - ./tools/.github/scripts/release.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} + ./tools/.github/scripts/release.py FreeRTOS --kernel-repo-path=local_kernel --kernel-commit=${{ env.COMMIT_SHA_2 }} --new-kernel-version=${{ github.event.inputs.version_number }} --new-kernel-main-br-version=${{ github.event.inputs.main_br_version }} exit $? env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Cleanup + working-directory: ./local_kernel + run: | + # Delete the branch created for Tag by SBOM generator + git push -u origin --delete ${{ github.event.inputs.version_number }} From d3c289fe5b48b6c4a92187c05d29d1a25175f5ab Mon Sep 17 00:00:00 2001 From: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> Date: Fri, 2 Jun 2023 11:03:40 -0700 Subject: [PATCH 44/62] Add back croutines by reverting PR#590 (#685) * Add croutines to the code base * Add croutine changes to cmake, lexicon and readme * Add croutine file to portable cmake file * Add back more references from PR 591 --- .github/lexicon.txt | 45 +- CMakeLists.txt | 1 + README.md | 3 +- croutine.c | 363 +++++++++ include/FreeRTOS.h | 11 +- include/croutine.h | 753 +++++++++++++++++++ include/queue.h | 22 + portable/ThirdParty/GCC/RP2040/library.cmake | 1 + queue.c | 291 +++++++ tasks.c | 20 +- timers.c | 2 +- 11 files changed, 1497 insertions(+), 15 deletions(-) create mode 100644 croutine.c create mode 100644 include/croutine.h diff --git a/.github/lexicon.txt b/.github/lexicon.txt index a4c02373eb8..1a7d4852dc2 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -1,3 +1,4 @@ + GNU nano 5.9 .github/lexicon.txt Modified aa aaaa aarch @@ -317,6 +318,7 @@ coproc coprocessor coprocessors coreid +coroutinehandle covfs cp cpacr @@ -338,15 +340,32 @@ cprivilegedonlyaccessarray cpsid cpsie cpsr +cpsr +cpstored cpstored cpu +cpu cr +cr +crc crc crcb +crcoroutine +crcoroutine +crdelay +crdelay creadonlyarray creadwritearray createevent +crend +crend crgint +croutine +croutine +crqueue +crqueue +crstart +crstart crt crtv crxedchar @@ -1523,11 +1542,17 @@ prvaddcurrenttasktodelayedlist prvcheckinterfaces prvchecktaskswaitingtermination prvcopydatatoqueue +prvcoroutineflashtask +prvcoroutineflashtask +prvcoroutineflashworktask +prvcoroutineflashworktask prvdeletetcb prvexitfunction prvgettimens prvheapinit prvidletask +prvinitialisecoroutinelists +prvinitialisecoroutinelists prvinitialisemutex prvinitialisenewstreambuffer prvinitialisenewtimer @@ -1642,11 +1667,18 @@ pxblocktoinsert pxcallbackfunction pxcode pxcontainer +pxcoroutinecode +pxcoroutinecode +pxcoroutinewoken +pxcoroutinewoken pxcrcb pxcreatedtask +pxcurrentcoroutine +pxcurrentcoroutine pxcurrenttcb pxcurrenttcbconst pxcurrenttimerlist +pxdelayedcoroutinelist pxdelayedtasklist pxend pxendofstack @@ -1681,6 +1713,7 @@ pxnextfreeblock pxnexttcb pxoriginalsp pxoriginaltos +pxoverflowdelayedcoroutinelist pxoverflowdelayedtasklist pxowner pxportinitialisestack @@ -1690,6 +1723,7 @@ pxqueue pxqueuebuffer pxqueuesetcontainer pxramstack +pxreadycoroutinelists pxreadytaskslists pxreceivecompletedcallback pxregions @@ -2463,6 +2497,7 @@ uxtopreadypriority uxtopusedpriority uxvariabletoincrement uxwantedbytes +vacoroutine vadifferenttask vafunction val @@ -2492,6 +2527,7 @@ vbr vbufferisr vcallbackfunction vclearinterruptmask +vcoroutineschedule vddcore vec vectactive @@ -2501,6 +2537,7 @@ ver veventgroupclearbitscallback veventgroupdelete veventgroupsetbitscallback +vflashcoroutine vfp vfunction vic @@ -2563,12 +2600,14 @@ vqueuedelete vqueueunregisterqueue vr vraiseprivilege +vreceivingcoroutine vreg vresetprivilege vrestorecontextoffirsttask vrpm vsemaphorecreatebinary vsemaphoredelete +vsendingcoroutine vsetbacklightstate vsoftwareinterruptentry vstartfirsttask @@ -2702,6 +2741,9 @@ xcommandtime xcommsrxqueue xconsttickcount xcopyposition +xcoroutinecreate +xcoroutinepreviouslywoken +xcoroutinequeue xcount xcreatedeventgroup xcrwokenbypost @@ -2711,6 +2753,7 @@ xdd xdddd xdeadbeef xdelay +xdelayedcoroutinelist xdelayedtasklist xdelaytime xe @@ -2859,6 +2902,7 @@ xpar xparameters xpendedcounts xpendedticks +xpendingreadycoroutinelist xpendingreadylist xperiod xportgetcoreid @@ -3100,4 +3144,3 @@ xwritevalue xxr xyieldpending xzr - diff --git a/CMakeLists.txt b/CMakeLists.txt index d45de64b1b5..46a9e180796 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -267,6 +267,7 @@ add_compile_options( add_subdirectory(portable) add_library(freertos_kernel STATIC + croutine.c event_groups.c list.c queue.c diff --git a/README.md b/README.md index 952914daf46..dd79eee6ce7 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,8 @@ git clone git@github.com:FreeRTOS/FreeRTOS-Kernel.git ## Repository structure - The root of this repository contains the three files that are common to every port - list.c, queue.c and tasks.c. The kernel is contained within these -three files. +three files. croutine.c implements the optional co-routine functionality - which +is normally only used on very memory limited systems. - The ```./portable``` directory contains the files that are specific to a particular microcontroller and/or compiler. See the readme file in the ```./portable``` directory for more information. diff --git a/croutine.c b/croutine.c new file mode 100644 index 00000000000..02b99ded3f2 --- /dev/null +++ b/croutine.c @@ -0,0 +1,363 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#include "FreeRTOS.h" +#include "task.h" +#include "croutine.h" + +/* Remove the whole file is co-routines are not being used. */ +#if ( configUSE_CO_ROUTINES != 0 ) + +/* + * Some kernel aware debuggers require data to be viewed to be global, rather + * than file scope. + */ + #ifdef portREMOVE_STATIC_QUALIFIER + #define static + #endif + + +/* Lists for ready and blocked co-routines. --------------------*/ + static List_t pxReadyCoRoutineLists[ configMAX_CO_ROUTINE_PRIORITIES ]; /*< Prioritised ready co-routines. */ + static List_t xDelayedCoRoutineList1; /*< Delayed co-routines. */ + static List_t xDelayedCoRoutineList2; /*< Delayed co-routines (two lists are used - one for delays that have overflowed the current tick count. */ + static List_t * pxDelayedCoRoutineList = NULL; /*< Points to the delayed co-routine list currently being used. */ + static List_t * pxOverflowDelayedCoRoutineList = NULL; /*< Points to the delayed co-routine list currently being used to hold co-routines that have overflowed the current tick count. */ + static List_t xPendingReadyCoRoutineList; /*< Holds co-routines that have been readied by an external event. They cannot be added directly to the ready lists as the ready lists cannot be accessed by interrupts. */ + +/* Other file private variables. --------------------------------*/ + CRCB_t * pxCurrentCoRoutine = NULL; + static UBaseType_t uxTopCoRoutineReadyPriority = 0; + static TickType_t xCoRoutineTickCount = 0, xLastTickCount = 0, xPassedTicks = 0; + +/* The initial state of the co-routine when it is created. */ + #define corINITIAL_STATE ( 0 ) + +/* + * Place the co-routine represented by pxCRCB into the appropriate ready queue + * for the priority. It is inserted at the end of the list. + * + * This macro accesses the co-routine ready lists and therefore must not be + * used from within an ISR. + */ + #define prvAddCoRoutineToReadyQueue( pxCRCB ) \ + { \ + if( ( pxCRCB )->uxPriority > uxTopCoRoutineReadyPriority ) \ + { \ + uxTopCoRoutineReadyPriority = ( pxCRCB )->uxPriority; \ + } \ + vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ ( pxCRCB )->uxPriority ] ), &( ( pxCRCB )->xGenericListItem ) ); \ + } + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first co-routine. + */ + static void prvInitialiseCoRoutineLists( void ); + +/* + * Co-routines that are readied by an interrupt cannot be placed directly into + * the ready lists (there is no mutual exclusion). Instead they are placed in + * in the pending ready list in order that they can later be moved to the ready + * list by the co-routine scheduler. + */ + static void prvCheckPendingReadyList( void ); + +/* + * Macro that looks at the list of co-routines that are currently delayed to + * see if any require waking. + * + * Co-routines are stored in the queue in the order of their wake time - + * meaning once one co-routine has been found whose timer has not expired + * we need not look any further down the list. + */ + static void prvCheckDelayedList( void ); + +/*-----------------------------------------------------------*/ + + BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex ) + { + BaseType_t xReturn; + CRCB_t * pxCoRoutine; + + /* Allocate the memory that will store the co-routine control block. */ + pxCoRoutine = ( CRCB_t * ) pvPortMalloc( sizeof( CRCB_t ) ); + + if( pxCoRoutine ) + { + /* If pxCurrentCoRoutine is NULL then this is the first co-routine to + * be created and the co-routine data structures need initialising. */ + if( pxCurrentCoRoutine == NULL ) + { + pxCurrentCoRoutine = pxCoRoutine; + prvInitialiseCoRoutineLists(); + } + + /* Check the priority is within limits. */ + if( uxPriority >= configMAX_CO_ROUTINE_PRIORITIES ) + { + uxPriority = configMAX_CO_ROUTINE_PRIORITIES - 1; + } + + /* Fill out the co-routine control block from the function parameters. */ + pxCoRoutine->uxState = corINITIAL_STATE; + pxCoRoutine->uxPriority = uxPriority; + pxCoRoutine->uxIndex = uxIndex; + pxCoRoutine->pxCoRoutineFunction = pxCoRoutineCode; + + /* Initialise all the other co-routine control block parameters. */ + vListInitialiseItem( &( pxCoRoutine->xGenericListItem ) ); + vListInitialiseItem( &( pxCoRoutine->xEventListItem ) ); + + /* Set the co-routine control block as a link back from the ListItem_t. + * This is so we can get back to the containing CRCB from a generic item + * in a list. */ + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xGenericListItem ), pxCoRoutine ); + listSET_LIST_ITEM_OWNER( &( pxCoRoutine->xEventListItem ), pxCoRoutine ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxCoRoutine->xEventListItem ), ( ( TickType_t ) configMAX_CO_ROUTINE_PRIORITIES - ( TickType_t ) uxPriority ) ); + + /* Now the co-routine has been initialised it can be added to the ready + * list at the correct priority. */ + prvAddCoRoutineToReadyQueue( pxCoRoutine ); + + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, + List_t * pxEventList ) + { + TickType_t xTimeToWake; + + /* Calculate the time to wake - this may overflow but this is + * not a problem. */ + xTimeToWake = xCoRoutineTickCount + xTicksToDelay; + + /* We must remove ourselves from the ready list before adding + * ourselves to the blocked list as the same list item is used for + * both lists. */ + ( void ) uxListRemove( ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentCoRoutine->xGenericListItem ), xTimeToWake ); + + if( xTimeToWake < xCoRoutineTickCount ) + { + /* Wake time has overflowed. Place this item in the + * overflow list. */ + vListInsert( ( List_t * ) pxOverflowDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + else + { + /* The wake time has not overflowed, so we can use the + * current block list. */ + vListInsert( ( List_t * ) pxDelayedCoRoutineList, ( ListItem_t * ) &( pxCurrentCoRoutine->xGenericListItem ) ); + } + + if( pxEventList ) + { + /* Also add the co-routine to an event list. If this is done then the + * function must be called with interrupts disabled. */ + vListInsert( pxEventList, &( pxCurrentCoRoutine->xEventListItem ) ); + } + } +/*-----------------------------------------------------------*/ + + static void prvCheckPendingReadyList( void ) + { + /* Are there any co-routines waiting to get moved to the ready list? These + * are co-routines that have been readied by an ISR. The ISR cannot access + * the ready lists itself. */ + while( listLIST_IS_EMPTY( &xPendingReadyCoRoutineList ) == pdFALSE ) + { + CRCB_t * pxUnblockedCRCB; + + /* The pending ready list can be accessed by an ISR. */ + portDISABLE_INTERRUPTS(); + { + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyCoRoutineList ) ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + } + portENABLE_INTERRUPTS(); + + ( void ) uxListRemove( &( pxUnblockedCRCB->xGenericListItem ) ); + prvAddCoRoutineToReadyQueue( pxUnblockedCRCB ); + } + } +/*-----------------------------------------------------------*/ + + static void prvCheckDelayedList( void ) + { + CRCB_t * pxCRCB; + + xPassedTicks = xTaskGetTickCount() - xLastTickCount; + + while( xPassedTicks ) + { + xCoRoutineTickCount++; + xPassedTicks--; + + /* If the tick count has overflowed we need to swap the ready lists. */ + if( xCoRoutineTickCount == 0 ) + { + List_t * pxTemp; + + /* Tick count has overflowed so we need to swap the delay lists. If there are + * any items in pxDelayedCoRoutineList here then there is an error! */ + pxTemp = pxDelayedCoRoutineList; + pxDelayedCoRoutineList = pxOverflowDelayedCoRoutineList; + pxOverflowDelayedCoRoutineList = pxTemp; + } + + /* See if this tick has made a timeout expire. */ + while( listLIST_IS_EMPTY( pxDelayedCoRoutineList ) == pdFALSE ) + { + pxCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxDelayedCoRoutineList ); + + if( xCoRoutineTickCount < listGET_LIST_ITEM_VALUE( &( pxCRCB->xGenericListItem ) ) ) + { + /* Timeout not yet expired. */ + break; + } + + portDISABLE_INTERRUPTS(); + { + /* The event could have occurred just before this critical + * section. If this is the case then the generic list item will + * have been moved to the pending ready list and the following + * line is still valid. Also the pvContainer parameter will have + * been set to NULL so the following lines are also valid. */ + ( void ) uxListRemove( &( pxCRCB->xGenericListItem ) ); + + /* Is the co-routine waiting on an event also? */ + if( pxCRCB->xEventListItem.pxContainer ) + { + ( void ) uxListRemove( &( pxCRCB->xEventListItem ) ); + } + } + portENABLE_INTERRUPTS(); + + prvAddCoRoutineToReadyQueue( pxCRCB ); + } + } + + xLastTickCount = xCoRoutineTickCount; + } +/*-----------------------------------------------------------*/ + + void vCoRoutineSchedule( void ) + { + /* Only run a co-routine after prvInitialiseCoRoutineLists() has been + * called. prvInitialiseCoRoutineLists() is called automatically when a + * co-routine is created. */ + if( pxDelayedCoRoutineList != NULL ) + { + /* See if any co-routines readied by events need moving to the ready lists. */ + prvCheckPendingReadyList(); + + /* See if any delayed co-routines have timed out. */ + prvCheckDelayedList(); + + /* Find the highest priority queue that contains ready co-routines. */ + while( listLIST_IS_EMPTY( &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ) ) + { + if( uxTopCoRoutineReadyPriority == 0 ) + { + /* No more co-routines to check. */ + return; + } + + --uxTopCoRoutineReadyPriority; + } + + /* listGET_OWNER_OF_NEXT_ENTRY walks through the list, so the co-routines + * of the same priority get an equal share of the processor time. */ + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentCoRoutine, &( pxReadyCoRoutineLists[ uxTopCoRoutineReadyPriority ] ) ); + + /* Call the co-routine. */ + ( pxCurrentCoRoutine->pxCoRoutineFunction )( pxCurrentCoRoutine, pxCurrentCoRoutine->uxIndex ); + } + } +/*-----------------------------------------------------------*/ + + static void prvInitialiseCoRoutineLists( void ) + { + UBaseType_t uxPriority; + + for( uxPriority = 0; uxPriority < configMAX_CO_ROUTINE_PRIORITIES; uxPriority++ ) + { + vListInitialise( ( List_t * ) &( pxReadyCoRoutineLists[ uxPriority ] ) ); + } + + vListInitialise( ( List_t * ) &xDelayedCoRoutineList1 ); + vListInitialise( ( List_t * ) &xDelayedCoRoutineList2 ); + vListInitialise( ( List_t * ) &xPendingReadyCoRoutineList ); + + /* Start with pxDelayedCoRoutineList using list1 and the + * pxOverflowDelayedCoRoutineList using list2. */ + pxDelayedCoRoutineList = &xDelayedCoRoutineList1; + pxOverflowDelayedCoRoutineList = &xDelayedCoRoutineList2; + } +/*-----------------------------------------------------------*/ + + BaseType_t xCoRoutineRemoveFromEventList( const List_t * pxEventList ) + { + CRCB_t * pxUnblockedCRCB; + BaseType_t xReturn; + + /* This function is called from within an interrupt. It can only access + * event lists and the pending ready list. This function assumes that a + * check has already been made to ensure pxEventList is not empty. */ + pxUnblockedCRCB = ( CRCB_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); + ( void ) uxListRemove( &( pxUnblockedCRCB->xEventListItem ) ); + vListInsertEnd( ( List_t * ) &( xPendingReadyCoRoutineList ), &( pxUnblockedCRCB->xEventListItem ) ); + + if( pxUnblockedCRCB->uxPriority >= pxCurrentCoRoutine->uxPriority ) + { + xReturn = pdTRUE; + } + else + { + xReturn = pdFALSE; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES == 0 */ diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index f1ab32c6c1b..44d5efdf269 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -169,6 +169,10 @@ #error Macro configTICK_TYPE_WIDTH_IN_BITS is defined to incorrect value. See the Configuration section of the FreeRTOS API documentation for details. #endif +#ifndef configUSE_CO_ROUTINES + #define configUSE_CO_ROUTINES 0 +#endif + #ifndef INCLUDE_vTaskPrioritySet #define INCLUDE_vTaskPrioritySet 0 #endif @@ -263,8 +267,10 @@ #define INCLUDE_xTaskGetCurrentTaskHandle 1 #endif -#if ( defined( configUSE_CO_ROUTINES ) && configUSE_CO_ROUTINES != 0 ) - #warning Co-routines have been removed from FreeRTOS-Kernel versions released after V10.5.1. You can view previous versions of the FreeRTOS Kernel at github.com/freertos/freertos-kernel/tree/V10.5.1 . +#if configUSE_CO_ROUTINES != 0 + #ifndef configMAX_CO_ROUTINE_PRIORITIES + #error configMAX_CO_ROUTINE_PRIORITIES must be greater than or equal to 1. + #endif #endif #ifndef configUSE_DAEMON_TASK_STARTUP_HOOK @@ -1082,6 +1088,7 @@ #define xTaskParameters TaskParameters_t #define xTaskStatusType TaskStatus_t #define xTimerHandle TimerHandle_t + #define xCoRoutineHandle CoRoutineHandle_t #define pdTASK_HOOK_CODE TaskHookFunction_t #define portTICK_RATE_MS portTICK_PERIOD_MS #define pcTaskGetTaskName pcTaskGetName diff --git a/include/croutine.h b/include/croutine.h new file mode 100644 index 00000000000..8ac4aa521bc --- /dev/null +++ b/include/croutine.h @@ -0,0 +1,753 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef CO_ROUTINE_H +#define CO_ROUTINE_H + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include croutine.h" +#endif + +#include "list.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + +/* Used to hide the implementation of the co-routine control block. The + * control block structure however has to be included in the header due to + * the macro implementation of the co-routine functionality. */ +typedef void * CoRoutineHandle_t; + +/* Defines the prototype to which co-routine functions must conform. */ +typedef void (* crCOROUTINE_CODE)( CoRoutineHandle_t, + UBaseType_t ); + +typedef struct corCoRoutineControlBlock +{ + crCOROUTINE_CODE pxCoRoutineFunction; + ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /*< Used internally by the co-routine implementation. */ +} CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ + +/** + * croutine. h + * @code{c} + * BaseType_t xCoRoutineCreate( + * crCOROUTINE_CODE pxCoRoutineCode, + * UBaseType_t uxPriority, + * UBaseType_t uxIndex + * ); + * @endcode + * + * Create a new co-routine and add it to the list of co-routines that are + * ready to run. + * + * @param pxCoRoutineCode Pointer to the co-routine function. Co-routine + * functions require special syntax - see the co-routine section of the WEB + * documentation for more information. + * + * @param uxPriority The priority with respect to other co-routines at which + * the co-routine will run. + * + * @param uxIndex Used to distinguish between different co-routines that + * execute the same function. See the example below and the co-routine section + * of the WEB documentation for further information. + * + * @return pdPASS if the co-routine was successfully created and added to a ready + * list, otherwise an error code defined with ProjDefs.h. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vFlashCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * // This may not be necessary for const variables. + * static const char cLedToFlash[ 2 ] = { 5, 6 }; + * static const TickType_t uxFlashRates[ 2 ] = { 200, 400 }; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // This co-routine just delays for a fixed period, then toggles + * // an LED. Two co-routines are created using this function, so + * // the uxIndex parameter is used to tell the co-routine which + * // LED to flash and how int32_t to delay. This assumes xQueue has + * // already been created. + * vParTestToggleLED( cLedToFlash[ uxIndex ] ); + * crDELAY( xHandle, uxFlashRates[ uxIndex ] ); + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * + * // Function that creates two co-routines. + * void vOtherFunction( void ) + * { + * uint8_t ucParameterToPass; + * TaskHandle_t xHandle; + * + * // Create two co-routines at priority 0. The first is given index 0 + * // so (from the code above) toggles LED 5 every 200 ticks. The second + * // is given index 1 so toggles LED 6 every 400 ticks. + * for( uxIndex = 0; uxIndex < 2; uxIndex++ ) + * { + * xCoRoutineCreate( vFlashCoRoutine, 0, uxIndex ); + * } + * } + * @endcode + * \defgroup xCoRoutineCreate xCoRoutineCreate + * \ingroup Tasks + */ +BaseType_t xCoRoutineCreate( crCOROUTINE_CODE pxCoRoutineCode, + UBaseType_t uxPriority, + UBaseType_t uxIndex ); + + +/** + * croutine. h + * @code{c} + * void vCoRoutineSchedule( void ); + * @endcode + * + * Run a co-routine. + * + * vCoRoutineSchedule() executes the highest priority co-routine that is able + * to run. The co-routine will execute until it either blocks, yields or is + * preempted by a task. Co-routines execute cooperatively so one + * co-routine cannot be preempted by another, but can be preempted by a task. + * + * If an application comprises of both tasks and co-routines then + * vCoRoutineSchedule should be called from the idle task (in an idle task + * hook). + * + * Example usage: + * @code{c} + * // This idle task hook will schedule a co-routine each time it is called. + * // The rest of the idle task will execute between co-routine calls. + * void vApplicationIdleHook( void ) + * { + * vCoRoutineSchedule(); + * } + * + * // Alternatively, if you do not require any other part of the idle task to + * // execute, the idle task hook can call vCoRoutineSchedule() within an + * // infinite loop. + * void vApplicationIdleHook( void ) + * { + * for( ;; ) + * { + * vCoRoutineSchedule(); + * } + * } + * @endcode + * \defgroup vCoRoutineSchedule vCoRoutineSchedule + * \ingroup Tasks + */ +void vCoRoutineSchedule( void ); + +/** + * croutine. h + * @code{c} + * crSTART( CoRoutineHandle_t xHandle ); + * @endcode + * + * This macro MUST always be called at the start of a co-routine function. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static int32_t ulAVariable; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Co-routine functionality goes here. + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * @endcode + * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crSTART( pxCRCB ) \ + switch( ( ( CRCB_t * ) ( pxCRCB ) )->uxState ) { \ + case 0: + +/** + * croutine. h + * @code{c} + * crEND(); + * @endcode + * + * This macro MUST always be called at the end of a co-routine function. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static int32_t ulAVariable; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Co-routine functionality goes here. + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * @endcode + * \defgroup crSTART crSTART + * \ingroup Tasks + */ +#define crEND() } + +/* + * These macros are intended for internal use by the co-routine implementation + * only. The macros should not be used directly by application writers. + */ +#define crSET_STATE0( xHandle ) \ + ( ( CRCB_t * ) ( xHandle ) )->uxState = ( __LINE__ * 2 ); return; \ + case ( __LINE__ * 2 ): +#define crSET_STATE1( xHandle ) \ + ( ( CRCB_t * ) ( xHandle ) )->uxState = ( ( __LINE__ * 2 ) + 1 ); return; \ + case ( ( __LINE__ * 2 ) + 1 ): + +/** + * croutine. h + * @code{c} + * crDELAY( CoRoutineHandle_t xHandle, TickType_t xTicksToDelay ); + * @endcode + * + * Delay a co-routine for a fixed period of time. + * + * crDELAY can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * @param xHandle The handle of the co-routine to delay. This is the xHandle + * parameter of the co-routine function. + * + * @param xTickToDelay The number of ticks that the co-routine should delay + * for. The actual amount of time this equates to is defined by + * configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant portTICK_PERIOD_MS + * can be used to convert ticks to milliseconds. + * + * Example usage: + * @code{c} + * // Co-routine to be created. + * void vACoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * // This may not be necessary for const variables. + * // We are to delay for 200ms. + * static const xTickType xDelayTime = 200 / portTICK_PERIOD_MS; + * + * // Must start every co-routine with a call to crSTART(); + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Delay for 200ms. + * crDELAY( xHandle, xDelayTime ); + * + * // Do something here. + * } + * + * // Must end every co-routine with a call to crEND(); + * crEND(); + * } + * @endcode + * \defgroup crDELAY crDELAY + * \ingroup Tasks + */ +#define crDELAY( xHandle, xTicksToDelay ) \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); + +/** + * @code{c} + * crQUEUE_SEND( + * CoRoutineHandle_t xHandle, + * QueueHandle_t pxQueue, + * void *pvItemToQueue, + * TickType_t xTicksToWait, + * BaseType_t *pxResult + * ) + * @endcode + * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_SEND can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue on which the data will be posted. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvItemToQueue A pointer to the data being posted onto the queue. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied from pvItemToQueue into the queue + * itself. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for space to become available on the queue, should space not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see example + * below). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully posted onto the queue, otherwise it will be set to an + * error defined within ProjDefs.h. + * + * Example usage: + * @code{c} + * // Co-routine function that blocks for a fixed period then posts a number onto + * // a queue. + * static void prvCoRoutineFlashTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static BaseType_t xNumberToPost = 0; + * static BaseType_t xResult; + * + * // Co-routines must begin with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // This assumes the queue has already been created. + * crQUEUE_SEND( xHandle, xCoRoutineQueue, &xNumberToPost, NO_DELAY, &xResult ); + * + * if( xResult != pdPASS ) + * { + * // The message was not posted! + * } + * + * // Increment the number to be posted onto the queue. + * xNumberToPost++; + * + * // Delay for 100 ticks. + * crDELAY( xHandle, 100 ); + * } + * + * // Co-routines must end with a call to crEND(). + * crEND(); + * } + * @endcode + * \defgroup crQUEUE_SEND crQUEUE_SEND + * \ingroup Tasks + */ +#define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ + { \ + *( pxResult ) = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *pxResult = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), 0 ); \ + } \ + if( *pxResult == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *pxResult = pdPASS; \ + } \ + } + +/** + * croutine. h + * @code{c} + * crQUEUE_RECEIVE( + * CoRoutineHandle_t xHandle, + * QueueHandle_t pxQueue, + * void *pvBuffer, + * TickType_t xTicksToWait, + * BaseType_t *pxResult + * ) + * @endcode + * + * The macro's crQUEUE_SEND() and crQUEUE_RECEIVE() are the co-routine + * equivalent to the xQueueSend() and xQueueReceive() functions used by tasks. + * + * crQUEUE_SEND and crQUEUE_RECEIVE can only be used from a co-routine whereas + * xQueueSend() and xQueueReceive() can only be used from tasks. + * + * crQUEUE_RECEIVE can only be called from the co-routine function itself - not + * from within a function called by the co-routine function. This is because + * co-routines do not maintain their own stack. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xHandle The handle of the calling co-routine. This is the xHandle + * parameter of the co-routine function. + * + * @param pxQueue The handle of the queue from which the data will be received. + * The handle is obtained as the return value when the queue is created using + * the xQueueCreate() API function. + * + * @param pvBuffer The buffer into which the received item is to be copied. + * The number of bytes of each queued item is specified when the queue is + * created. This number of bytes is copied into pvBuffer. + * + * @param xTickToDelay The number of ticks that the co-routine should block + * to wait for data to become available from the queue, should data not be + * available immediately. The actual amount of time this equates to is defined + * by configTICK_RATE_HZ (set in FreeRTOSConfig.h). The constant + * portTICK_PERIOD_MS can be used to convert ticks to milliseconds (see the + * crQUEUE_SEND example). + * + * @param pxResult The variable pointed to by pxResult will be set to pdPASS if + * data was successfully retrieved from the queue, otherwise it will be set to + * an error code as defined within ProjDefs.h. + * + * Example usage: + * @code{c} + * // A co-routine receives the number of an LED to flash from a queue. It + * // blocks on the queue until the number is received. + * static void prvCoRoutineFlashWorkTask( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // Variables in co-routines must be declared static if they must maintain value across a blocking call. + * static BaseType_t xResult; + * static UBaseType_t uxLEDToFlash; + * + * // All co-routines must start with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Wait for data to become available on the queue. + * crQUEUE_RECEIVE( xHandle, xCoRoutineQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + * + * if( xResult == pdPASS ) + * { + * // We received the LED to flash - flash it! + * vParTestToggleLED( uxLEDToFlash ); + * } + * } + * + * crEND(); + * } + * @endcode + * \defgroup crQUEUE_RECEIVE crQUEUE_RECEIVE + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ + { \ + *( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), ( xTicksToWait ) ); \ + if( *( pxResult ) == errQUEUE_BLOCKED ) \ + { \ + crSET_STATE0( ( xHandle ) ); \ + *( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), 0 ); \ + } \ + if( *( pxResult ) == errQUEUE_YIELD ) \ + { \ + crSET_STATE1( ( xHandle ) ); \ + *( pxResult ) = pdPASS; \ + } \ + } + +/** + * croutine. h + * @code{c} + * crQUEUE_SEND_FROM_ISR( + * QueueHandle_t pxQueue, + * void *pvItemToQueue, + * BaseType_t xCoRoutinePreviouslyWoken + * ) + * @endcode + * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_SEND_FROM_ISR can only be called from an ISR to send data to a queue + * that is being used from within a co-routine. + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvItemToQueue A pointer to the item that is to be placed on the + * queue. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from pvItemToQueue + * into the queue storage area. + * + * @param xCoRoutinePreviouslyWoken This is included so an ISR can post onto + * the same queue multiple times from a single interrupt. The first call + * should always pass in pdFALSE. Subsequent calls should pass in + * the value returned from the previous call. + * + * @return pdTRUE if a co-routine was woken by posting onto the queue. This is + * used by the ISR to determine if a context switch may be required following + * the ISR. + * + * Example usage: + * @code{c} + * // A co-routine that blocks on a queue waiting for characters to be received. + * static void vReceivingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * char cRxedChar; + * BaseType_t xResult; + * + * // All co-routines must start with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Wait for data to become available on the queue. This assumes the + * // queue xCommsRxQueue has already been created! + * crQUEUE_RECEIVE( xHandle, xCommsRxQueue, &uxLEDToFlash, portMAX_DELAY, &xResult ); + * + * // Was a character received? + * if( xResult == pdPASS ) + * { + * // Process the character here. + * } + * } + * + * // All co-routines must end with a call to crEND(). + * crEND(); + * } + * + * // An ISR that uses a queue to send characters received on a serial port to + * // a co-routine. + * void vUART_ISR( void ) + * { + * char cRxedChar; + * BaseType_t xCRWokenByPost = pdFALSE; + * + * // We loop around reading characters until there are none left in the UART. + * while( UART_RX_REG_NOT_EMPTY() ) + * { + * // Obtain the character from the UART. + * cRxedChar = UART_RX_REG; + * + * // Post the character onto a queue. xCRWokenByPost will be pdFALSE + * // the first time around the loop. If the post causes a co-routine + * // to be woken (unblocked) then xCRWokenByPost will be set to pdTRUE. + * // In this manner we can ensure that if more than one co-routine is + * // blocked on the queue only one is woken by this ISR no matter how + * // many characters are posted to the queue. + * xCRWokenByPost = crQUEUE_SEND_FROM_ISR( xCommsRxQueue, &cRxedChar, xCRWokenByPost ); + * } + * } + * @endcode + * \defgroup crQUEUE_SEND_FROM_ISR crQUEUE_SEND_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_SEND_FROM_ISR( pxQueue, pvItemToQueue, xCoRoutinePreviouslyWoken ) \ + xQueueCRSendFromISR( ( pxQueue ), ( pvItemToQueue ), ( xCoRoutinePreviouslyWoken ) ) + + +/** + * croutine. h + * @code{c} + * crQUEUE_SEND_FROM_ISR( + * QueueHandle_t pxQueue, + * void *pvBuffer, + * BaseType_t * pxCoRoutineWoken + * ) + * @endcode + * + * The macro's crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() are the + * co-routine equivalent to the xQueueSendFromISR() and xQueueReceiveFromISR() + * functions used by tasks. + * + * crQUEUE_SEND_FROM_ISR() and crQUEUE_RECEIVE_FROM_ISR() can only be used to + * pass data between a co-routine and and ISR, whereas xQueueSendFromISR() and + * xQueueReceiveFromISR() can only be used to pass data between a task and and + * ISR. + * + * crQUEUE_RECEIVE_FROM_ISR can only be called from an ISR to receive data + * from a queue that is being used from within a co-routine (a co-routine + * posted to the queue). + * + * See the co-routine section of the WEB documentation for information on + * passing data between tasks and co-routines and between ISR's and + * co-routines. + * + * @param xQueue The handle to the queue on which the item is to be posted. + * + * @param pvBuffer A pointer to a buffer into which the received item will be + * placed. The size of the items the queue will hold was defined when the + * queue was created, so this many bytes will be copied from the queue into + * pvBuffer. + * + * @param pxCoRoutineWoken A co-routine may be blocked waiting for space to become + * available on the queue. If crQUEUE_RECEIVE_FROM_ISR causes such a + * co-routine to unblock *pxCoRoutineWoken will get set to pdTRUE, otherwise + * *pxCoRoutineWoken will remain unchanged. + * + * @return pdTRUE an item was successfully received from the queue, otherwise + * pdFALSE. + * + * Example usage: + * @code{c} + * // A co-routine that posts a character to a queue then blocks for a fixed + * // period. The character is incremented each time. + * static void vSendingCoRoutine( CoRoutineHandle_t xHandle, UBaseType_t uxIndex ) + * { + * // cChar holds its value while this co-routine is blocked and must therefore + * // be declared static. + * static char cCharToTx = 'a'; + * BaseType_t xResult; + * + * // All co-routines must start with a call to crSTART(). + * crSTART( xHandle ); + * + * for( ;; ) + * { + * // Send the next character to the queue. + * crQUEUE_SEND( xHandle, xCoRoutineQueue, &cCharToTx, NO_DELAY, &xResult ); + * + * if( xResult == pdPASS ) + * { + * // The character was successfully posted to the queue. + * } + * else + * { + * // Could not post the character to the queue. + * } + * + * // Enable the UART Tx interrupt to cause an interrupt in this + * // hypothetical UART. The interrupt will obtain the character + * // from the queue and send it. + * ENABLE_RX_INTERRUPT(); + * + * // Increment to the next character then block for a fixed period. + * // cCharToTx will maintain its value across the delay as it is + * // declared static. + * cCharToTx++; + * if( cCharToTx > 'x' ) + * { + * cCharToTx = 'a'; + * } + * crDELAY( 100 ); + * } + * + * // All co-routines must end with a call to crEND(). + * crEND(); + * } + * + * // An ISR that uses a queue to receive characters to send on a UART. + * void vUART_ISR( void ) + * { + * char cCharToTx; + * BaseType_t xCRWokenByPost = pdFALSE; + * + * while( UART_TX_REG_EMPTY() ) + * { + * // Are there any characters in the queue waiting to be sent? + * // xCRWokenByPost will automatically be set to pdTRUE if a co-routine + * // is woken by the post - ensuring that only a single co-routine is + * // woken no matter how many times we go around this loop. + * if( crQUEUE_RECEIVE_FROM_ISR( pxQueue, &cCharToTx, &xCRWokenByPost ) ) + * { + * SEND_CHARACTER( cCharToTx ); + * } + * } + * } + * @endcode + * \defgroup crQUEUE_RECEIVE_FROM_ISR crQUEUE_RECEIVE_FROM_ISR + * \ingroup Tasks + */ +#define crQUEUE_RECEIVE_FROM_ISR( pxQueue, pvBuffer, pxCoRoutineWoken ) \ + xQueueCRReceiveFromISR( ( pxQueue ), ( pvBuffer ), ( pxCoRoutineWoken ) ) + +/* + * This function is intended for internal use by the co-routine macros only. + * The macro nature of the co-routine implementation requires that the + * prototype appears here. The function should not be used by application + * writers. + * + * Removes the current co-routine from its ready list and places it in the + * appropriate delayed list. + */ +void vCoRoutineAddToDelayedList( TickType_t xTicksToDelay, + List_t * pxEventList ); + +/* + * This function is intended for internal use by the queue implementation only. + * The function should not be used by application writers. + * + * Removes the highest priority co-routine from the event list and places it in + * the pending ready list. + */ +BaseType_t xCoRoutineRemoveFromEventList( const List_t * pxEventList ); + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + +#endif /* CO_ROUTINE_H */ diff --git a/include/queue.h b/include/queue.h index 66c8286aef0..e5092ac5217 100644 --- a/include/queue.h +++ b/include/queue.h @@ -1455,6 +1455,28 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FU BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +/* + * The functions defined above are for passing data to and from tasks. The + * functions below are the equivalents for passing data to and from + * co-routines. + * + * These functions are called from the co-routine macro implementation and + * should not be called directly from application code. Instead use the macro + * wrappers defined within croutine.h. + */ +BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, + const void * pvItemToQueue, + BaseType_t xCoRoutinePreviouslyWoken ); +BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, + void * pvBuffer, + BaseType_t * pxTaskWoken ); +BaseType_t xQueueCRSend( QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait ); +BaseType_t xQueueCRReceive( QueueHandle_t xQueue, + void * pvBuffer, + TickType_t xTicksToWait ); + /* * For internal use only. Use xSemaphoreCreateMutex(), * xSemaphoreCreateCounting() or xSemaphoreGetMutexHolder() instead of calling diff --git a/portable/ThirdParty/GCC/RP2040/library.cmake b/portable/ThirdParty/GCC/RP2040/library.cmake index e730e6fb524..902a21766ad 100644 --- a/portable/ThirdParty/GCC/RP2040/library.cmake +++ b/portable/ThirdParty/GCC/RP2040/library.cmake @@ -6,6 +6,7 @@ add_library(FreeRTOS-Kernel-Core INTERFACE) target_sources(FreeRTOS-Kernel-Core INTERFACE + ${FREERTOS_KERNEL_PATH}/croutine.c ${FREERTOS_KERNEL_PATH}/event_groups.c ${FREERTOS_KERNEL_PATH}/list.c ${FREERTOS_KERNEL_PATH}/queue.c diff --git a/queue.c b/queue.c index 662052f5ef6..27776f0e45c 100644 --- a/queue.c +++ b/queue.c @@ -38,6 +38,10 @@ #include "task.h" #include "queue.h" +#if ( configUSE_CO_ROUTINES == 1 ) + #include "croutine.h" +#endif + /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined * for the header files above, but not in this file, in order to generate the @@ -2523,6 +2527,293 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ /*-----------------------------------------------------------*/ +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSend( QueueHandle_t xQueue, + const void * pvItemToQueue, + TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already full we may have to block. A critical section + * is required to prevent an interrupt removing something from the queue + * between the check to see if the queue is full and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( prvIsQueueFull( pxQueue ) != pdFALSE ) + { + /* The queue is full - do we want to block or just leave without + * posting? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is called from a coroutine we cannot block directly, but + * return indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + /* There is room in the queue, copy the data into the queue. */ + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + xReturn = pdPASS; + + /* Were any co-routines waiting for data to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + * into the ready list as we are within a critical section. + * Instead the same pending ready list mechanism is used as if + * the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + /* The co-routine waiting has a higher priority so record + * that a yield might be appropriate. */ + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = errQUEUE_FULL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceive( QueueHandle_t xQueue, + void * pvBuffer, + TickType_t xTicksToWait ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* If the queue is already empty we may have to block. A critical section + * is required to prevent an interrupt adding something to the queue + * between the check to see if the queue is empty and blocking on the queue. */ + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 ) + { + /* There are no messages in the queue, do we want to block or just + * leave with nothing? */ + if( xTicksToWait > ( TickType_t ) 0 ) + { + /* As this is a co-routine we cannot block directly, but return + * indicating that we need to block. */ + vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) ); + portENABLE_INTERRUPTS(); + return errQUEUE_BLOCKED; + } + else + { + portENABLE_INTERRUPTS(); + return errQUEUE_FULL; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portENABLE_INTERRUPTS(); + + portDISABLE_INTERRUPTS(); + { + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Data is available from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + xReturn = pdPASS; + + /* Were any co-routines waiting for space to become available? */ + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + /* In this instance the co-routine could be placed directly + * into the ready list as we are within a critical section. + * Instead the same pending ready list mechanism is used as if + * the event were caused from within an interrupt. */ + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + xReturn = errQUEUE_YIELD; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + xReturn = pdFAIL; + } + } + portENABLE_INTERRUPTS(); + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, + const void * pvItemToQueue, + BaseType_t xCoRoutinePreviouslyWoken ) + { + Queue_t * const pxQueue = xQueue; + + /* Cannot block within an ISR so if there is no space on the queue then + * exit without doing anything. */ + if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) + { + prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK ); + + /* We only want to wake one co-routine per ISR, so check that a + * co-routine has not already been woken. */ + if( xCoRoutinePreviouslyWoken == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) + { + return pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xCoRoutinePreviouslyWoken; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_CO_ROUTINES == 1 ) + + BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, + void * pvBuffer, + BaseType_t * pxCoRoutineWoken ) + { + BaseType_t xReturn; + Queue_t * const pxQueue = xQueue; + + /* We cannot block from an ISR, so check there is data available. If + * not then just leave without doing anything. */ + if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) + { + /* Copy the data from the queue. */ + pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; + + if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) + { + pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --( pxQueue->uxMessagesWaiting ); + ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); + + if( ( *pxCoRoutineWoken ) == pdFALSE ) + { + if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) + { + if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE ) + { + *pxCoRoutineWoken = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_CO_ROUTINES */ +/*-----------------------------------------------------------*/ + #if ( configQUEUE_REGISTRY_SIZE > 0 ) void vQueueAddToRegistry( QueueHandle_t xQueue, diff --git a/tasks.c b/tasks.c index 97539d7b6cd..f2d6a40c307 100644 --- a/tasks.c +++ b/tasks.c @@ -2211,7 +2211,7 @@ BaseType_t xTaskResumeAll( void ) * appropriate ready list. */ while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ listREMOVE_ITEM( &( pxTCB->xEventListItem ) ); portMEMORY_BARRIER(); listREMOVE_ITEM( &( pxTCB->xStateListItem ) ); @@ -2379,11 +2379,11 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) { - listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ do { - listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ /* Check each character in the name looking for a match or * mismatch. */ @@ -2832,7 +2832,7 @@ BaseType_t xTaskIncrementTick( void ) * item at the head of the delayed list. This is the time * at which the task at the head of the delayed list must * be removed from the Blocked state. */ - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); if( xConstTickCount < xItemValue ) @@ -3120,7 +3120,7 @@ void vTaskSwitchContext( void ) /* Select a new task to run using either the generic C or port * optimised asm code. */ - taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ traceTASK_SWITCHED_IN(); /* After the new task is switched in, update the global errno. */ @@ -3245,7 +3245,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) * * This function assumes that a check has already been made to ensure that * pxEventList is not empty. */ - pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ configASSERT( pxUnblockedTCB ); listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) ); @@ -3309,7 +3309,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, /* Remove the event list form the event flag. Interrupts do not access * event flags. */ - pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ configASSERT( pxUnblockedTCB ); listREMOVE_ITEM( pxEventListItem ); @@ -3756,7 +3756,7 @@ static void prvCheckTasksWaitingTermination( void ) { taskENTER_CRITICAL(); { - pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); --uxCurrentNumberOfTasks; --uxDeletedTasksWaitingCleanUp; @@ -3885,7 +3885,7 @@ static void prvCheckTasksWaitingTermination( void ) if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) { - listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ /* Populate an TaskStatus_t structure within the * pxTaskStatusArray array for each task that is referenced from @@ -3893,7 +3893,7 @@ static void prvCheckTasksWaitingTermination( void ) * meaning of each TaskStatus_t structure member. */ do { - listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); uxTask++; } while( pxNextTCB != pxFirstTCB ); diff --git a/timers.c b/timers.c index 76a59a8b999..2c08db0af9d 100644 --- a/timers.c +++ b/timers.c @@ -565,7 +565,7 @@ static void prvProcessExpiredTimer( const TickType_t xNextExpireTime, const TickType_t xTimeNow ) { - Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + Timer_t * const pxTimer = ( Timer_t * ) listGET_OWNER_OF_HEAD_ENTRY( pxCurrentTimerList ); /*lint !e9087 !e9079 void * is used as this macro is used with tasks and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ /* Remove the timer from the list of active timers. A check has already * been performed to ensure the list is not empty. */ From 4a35c97fec56891dd747a07641ae2eab7ed78803 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 8 Jun 2023 10:52:08 +0800 Subject: [PATCH 45/62] Remove __NVIC_PRIO_BITS and configPRIO_BITS check in port (#683) * Remove __NVIC_PRIO_BITS and configPRIO_BITS check in CM3, CM4 and ARMv8. * Add hardware not implemented bits check. These bits should be zero. --------- Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/ARMv8M/non_secure/port.c | 33 +++----------------- portable/CCS/ARM_CM3/port.c | 26 +++------------ portable/CCS/ARM_CM4F/port.c | 26 +++------------ portable/GCC/ARM_CM23/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM3/port.c | 26 +++------------ portable/GCC/ARM_CM33/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM35P/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM3_MPU/port.c | 26 +++------------ portable/GCC/ARM_CM4F/port.c | 26 +++------------ portable/GCC/ARM_CM4_MPU/port.c | 26 +++------------ portable/GCC/ARM_CM55/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM7/r0p1/port.c | 26 +++------------ portable/GCC/ARM_CM85/non_secure/port.c | 33 +++----------------- portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM23/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM3/port.c | 26 +++------------ portable/IAR/ARM_CM33/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM35P/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM4F/port.c | 26 +++------------ portable/IAR/ARM_CM4F_MPU/port.c | 26 +++------------ portable/IAR/ARM_CM55/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM7/r0p1/port.c | 26 +++------------ portable/IAR/ARM_CM85/non_secure/port.c | 33 +++----------------- portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 33 +++----------------- portable/MikroC/ARM_CM4F/port.c | 26 +++------------ portable/RVDS/ARM_CM3/port.c | 26 +++------------ portable/RVDS/ARM_CM4F/port.c | 26 +++------------ portable/RVDS/ARM_CM4_MPU/port.c | 26 +++------------ portable/RVDS/ARM_CM7/r0p1/port.c | 26 +++------------ 37 files changed, 148 insertions(+), 961 deletions(-) diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index f3c4e5add03..a04efc9115a 100755 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -249,6 +249,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -285,28 +289,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index c675afe67b4..1aff18b584a 100755 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -268,6 +268,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -304,28 +308,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index 9b42eac5a4e..198793bd52b 100755 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -292,6 +292,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -328,28 +332,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM35P/non_secure/port.c +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index 619f2b0c8be..e9e4b57723f 100755 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -415,6 +415,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -451,28 +455,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index 88fc76db894..c59995f1a27 100755 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -335,6 +335,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -371,28 +375,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index ab76ee84204..17b3aa93486 100755 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -458,6 +458,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -494,28 +498,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried - * from hardware is at least as many as specified in the - * CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried - * from hardware is at least as many as specified in the - * FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 2be4f27704d..0fcbf5048ff 100755 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -323,6 +323,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -359,28 +363,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index d54c3aceb4a..21b69bdd04a 100755 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -241,6 +241,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -277,28 +281,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM35P/non_secure/port.c +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index e0deaf12840..0352fc3bdb4 100755 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -279,6 +279,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -315,28 +319,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 1b7cca65c86..742518be5bf 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -393,6 +393,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -429,28 +433,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 63f83993db9..3baf38aaaed 100755 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -267,6 +267,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -303,28 +307,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index 7bbe1b7bc53..dbbb99507ef 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -1128,6 +1128,10 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -1164,35 +1168,6 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - /* The interrupt priority bits are not modelled in QEMU and the assert that - * checks the number of implemented bits and __NVIC_PRIO_BITS will always fail. - * Therefore, this assert is not adding any value for QEMU targets. The config - * option `configDISABLE_INTERRUPT_PRIO_BITS_CHECK` should be defined in the - * `FreeRTOSConfig.h` for QEMU targets. */ - #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK - { - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the CMSIS __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == __NVIC_PRIO_BITS ); - } - #endif /* __NVIC_PRIO_BITS */ - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is equal to the FreeRTOS configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits == configPRIO_BITS ); - } - #endif /* configPRIO_BITS */ - } - #endif /* #ifndef configDISABLE_INTERRUPT_PRIO_BITS_CHECK */ - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 8ef593f5523..2fef250a1c8 100755 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -329,6 +329,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -365,28 +369,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index ae7ce37f37b..054ab02eedc 100755 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -294,6 +294,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -330,28 +334,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index cb003aa38f9..de48443bbe8 100755 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -360,6 +360,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -396,28 +400,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index 13e2f8a8ed1..fdf1ff95b02 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -453,6 +453,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -489,28 +493,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index 1df54ab2802..ad9971c8d82 100755 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -344,6 +344,10 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); + /* Check that the bits not implemented bits in hardware are zero in + * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); + /* Calculate the maximum acceptable priority group value for the number * of bits read back. */ @@ -380,28 +384,6 @@ BaseType_t xPortStartScheduler( void ) ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits; } - #ifdef __NVIC_PRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the CMSIS - * __NVIC_PRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= __NVIC_PRIO_BITS ); - } - #endif - - #ifdef configPRIO_BITS - { - /* - * Check that the number of implemented priority bits queried from - * hardware is at least as many as specified in the FreeRTOS - * configPRIO_BITS configuration macro. - */ - configASSERT( ulImplementedPrioBits >= configPRIO_BITS ); - } - #endif - /* Shift the priority group value back to its position within the AIRCR * register. */ ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT; From 80f67449ba6888241f8f9e0b00984081e6df6f0c Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 12 Jun 2023 17:33:52 +0800 Subject: [PATCH 46/62] Use UBaseType_t as interrupt mask (#689) * Use UBaseType_t as interrupt mask * Update GCC posix port to use UBaseType_t as interrupt mask --- event_groups.c | 6 ++-- portable/ThirdParty/GCC/Posix/port.c | 8 ++--- portable/ThirdParty/GCC/Posix/portmacro.h | 4 +-- queue.c | 24 +++++++-------- stream_buffer.c | 24 +++++++-------- tasks.c | 36 +++++++++++------------ 6 files changed, 51 insertions(+), 51 deletions(-) diff --git a/event_groups.c b/event_groups.c index 5c4b4297baf..d7238d89a30 100644 --- a/event_groups.c +++ b/event_groups.c @@ -521,15 +521,15 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) { - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; EventGroup_t const * const pxEventBits = xEventGroup; EventBits_t uxReturn; - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { uxReturn = pxEventBits->uxEventBits; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return uxReturn; } /*lint !e818 EventGroupHandle_t is a typedef used in other functions to so can't be pointer to const. */ diff --git a/portable/ThirdParty/GCC/Posix/port.c b/portable/ThirdParty/GCC/Posix/port.c index 5ac570d9a59..23aec65526e 100644 --- a/portable/ThirdParty/GCC/Posix/port.c +++ b/portable/ThirdParty/GCC/Posix/port.c @@ -332,17 +332,17 @@ void vPortEnableInterrupts( void ) } /*-----------------------------------------------------------*/ -portBASE_TYPE xPortSetInterruptMask( void ) +UBaseType_t xPortSetInterruptMask( void ) { /* Interrupts are always disabled inside ISRs (signals * handlers). */ - return pdTRUE; + return ( UBaseType_t )0; } /*-----------------------------------------------------------*/ -void vPortClearInterruptMask( portBASE_TYPE xMask ) +void vPortClearInterruptMask( UBaseType_t uxMask ) { - ( void ) xMask; + ( void ) uxMask; } /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index a3ab7d32393..df2db076e69 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -94,8 +94,8 @@ extern void vPortEnableInterrupts( void ); #define portSET_INTERRUPT_MASK() ( vPortDisableInterrupts() ) #define portCLEAR_INTERRUPT_MASK() ( vPortEnableInterrupts() ) -extern portBASE_TYPE xPortSetInterruptMask( void ); -extern void vPortClearInterruptMask( portBASE_TYPE xMask ); +extern UBaseType_t xPortSetInterruptMask( void ); +extern void vPortClearInterruptMask( UBaseType_t xMask ); extern void vPortEnterCritical( void ); extern void vPortExitCritical( void ); diff --git a/queue.c b/queue.c index 27776f0e45c..23e97044dce 100644 --- a/queue.c +++ b/queue.c @@ -1099,7 +1099,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const BaseType_t xCopyPosition ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1127,7 +1127,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, * read, instead return a flag to say whether a context switch is required or * not (i.e. has a task with a higher priority than us been woken by this * post). */ - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) { @@ -1252,7 +1252,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1262,7 +1262,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; /* Similar to xQueueGenericSendFromISR() but used with semaphores where the @@ -1298,7 +1298,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -1418,7 +1418,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, xReturn = errQUEUE_FULL; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1933,7 +1933,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; Queue_t * const pxQueue = xQueue; configASSERT( pxQueue ); @@ -1955,7 +1955,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; @@ -2015,7 +2015,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -2025,7 +2025,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, void * const pvBuffer ) { BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; int8_t * pcOriginalReadPosition; Queue_t * const pxQueue = xQueue; @@ -2049,7 +2049,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { /* Cannot block in an ISR, so check there is data available. */ if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) @@ -2070,7 +2070,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue, traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/stream_buffer.c b/stream_buffer.c index 7dbebbd624c..77543065591 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -96,9 +96,9 @@ #define sbRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ do { \ - portBASE_TYPE xSavedInterruptStatus; \ + UBaseType_t uxSavedInterruptStatus; \ \ - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \ { \ @@ -109,7 +109,7 @@ ( pxStreamBuffer )->xTaskWaitingToSend = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ } while( 0 ) #endif /* sbRECEIVE_COMPLETED_FROM_ISR */ @@ -173,9 +173,9 @@ #ifndef sbSEND_COMPLETE_FROM_ISR #define sbSEND_COMPLETE_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ do { \ - portBASE_TYPE xSavedInterruptStatus; \ + UBaseType_t uxSavedInterruptStatus; \ \ - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); \ { \ if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \ { \ @@ -186,7 +186,7 @@ ( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \ } \ } \ - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); \ + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \ } while( 0 ) #endif /* sbSEND_COMPLETE_FROM_ISR */ @@ -1214,11 +1214,11 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) { @@ -1234,7 +1234,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -1245,11 +1245,11 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf { StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; BaseType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( pxStreamBuffer ); - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) { @@ -1265,7 +1265,7 @@ BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuf xReturn = pdFALSE; } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } diff --git a/tasks.c b/tasks.c index f2d6a40c307..5548220abeb 100644 --- a/tasks.c +++ b/tasks.c @@ -1481,7 +1481,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { TCB_t const * pxTCB; UBaseType_t uxReturn; - portBASE_TYPE xSavedInterruptState; + UBaseType_t uxSavedInterruptState; /* RTOS ports that support interrupt nesting have the concept of a * maximum system call (or maximum API call) interrupt priority. @@ -1501,14 +1501,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); { /* If null is passed in here then it is the priority of the calling * task that is being queried. */ pxTCB = prvGetTCBFromHandle( xTask ); uxReturn = pxTCB->uxPriority; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptState ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); return uxReturn; } @@ -1894,7 +1894,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) { BaseType_t xYieldRequired = pdFALSE; TCB_t * const pxTCB = xTaskToResume; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToResume ); @@ -1916,7 +1916,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) { @@ -1957,7 +1957,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) mtCOVERAGE_TEST_MARKER(); } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xYieldRequired; } @@ -2315,7 +2315,7 @@ TickType_t xTaskGetTickCount( void ) TickType_t xTaskGetTickCountFromISR( void ) { TickType_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; /* RTOS ports that support interrupt nesting have the concept of a maximum * system call (or maximum API call) interrupt priority. Interrupts that are @@ -2333,11 +2333,11 @@ TickType_t xTaskGetTickCountFromISR( void ) * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); - xSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); { xReturn = xTickCount; } - portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -3015,18 +3015,18 @@ BaseType_t xTaskIncrementTick( void ) { TCB_t * pxTCB; TaskHookFunction_t xReturn; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; /* If xTask is NULL then set the calling task's hook. */ pxTCB = prvGetTCBFromHandle( xTask ); /* Save the hook function in the TCB. A critical section is required as * the value can be accessed from an interrupt. */ - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { xReturn = pxTCB->pxTaskTag; } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -5035,7 +5035,7 @@ TickType_t uxTaskResetEventItemValue( void ) TCB_t * pxTCB; uint8_t ucOriginalNotifyState; BaseType_t xReturn = pdPASS; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5060,7 +5060,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { if( pulPreviousNotificationValue != NULL ) { @@ -5154,7 +5154,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); return xReturn; } @@ -5170,7 +5170,7 @@ TickType_t uxTaskResetEventItemValue( void ) { TCB_t * pxTCB; uint8_t ucOriginalNotifyState; - portBASE_TYPE xSavedInterruptStatus; + UBaseType_t uxSavedInterruptStatus; configASSERT( xTaskToNotify ); configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); @@ -5195,7 +5195,7 @@ TickType_t uxTaskResetEventItemValue( void ) pxTCB = xTaskToNotify; - xSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); { ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; @@ -5245,7 +5245,7 @@ TickType_t uxTaskResetEventItemValue( void ) } } } - portCLEAR_INTERRUPT_MASK_FROM_ISR( xSavedInterruptStatus ); + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); } #endif /* configUSE_TASK_NOTIFICATIONS */ From 77d8086f1c36f97e4254adb715c40ae6dbcc6164 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 12 Jun 2023 17:44:23 +0800 Subject: [PATCH 47/62] Fix clang warning in croutine and stream buffer (#686) * Fix document warning in croutine * Fix cast-qual warning in stream buffer --- include/croutine.h | 10 +++++----- stream_buffer.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/croutine.h b/include/croutine.h index 8ac4aa521bc..96e39723080 100644 --- a/include/croutine.h +++ b/include/croutine.h @@ -53,11 +53,11 @@ typedef void (* crCOROUTINE_CODE)( CoRoutineHandle_t, typedef struct corCoRoutineControlBlock { crCOROUTINE_CODE pxCoRoutineFunction; - ListItem_t xGenericListItem; /*< List item used to place the CRCB in ready and blocked queues. */ - ListItem_t xEventListItem; /*< List item used to place the CRCB in event lists. */ - UBaseType_t uxPriority; /*< The priority of the co-routine in relation to other co-routines. */ - UBaseType_t uxIndex; /*< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ - uint16_t uxState; /*< Used internally by the co-routine implementation. */ + ListItem_t xGenericListItem; /**< List item used to place the CRCB in ready and blocked queues. */ + ListItem_t xEventListItem; /**< List item used to place the CRCB in event lists. */ + UBaseType_t uxPriority; /**< The priority of the co-routine in relation to other co-routines. */ + UBaseType_t uxIndex; /**< Used to distinguish between co-routines when multiple co-routines use the same co-routine function. */ + uint16_t uxState; /**< Used internally by the co-routine implementation. */ } CRCB_t; /* Co-routine control block. Note must be identical in size down to uxPriority with TCB_t. */ /** diff --git a/stream_buffer.c b/stream_buffer.c index 77543065591..cbc0939b3cd 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -478,7 +478,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, StaticStreamBuffer_t ** ppxStaticStreamBuffer ) { BaseType_t xReturn; - const StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; + StreamBuffer_t * const pxStreamBuffer = xStreamBuffer; configASSERT( pxStreamBuffer ); configASSERT( ppucStreamBufferStorageArea ); From 17a46c252f2f237e03a6768c5d15731215322f31 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 12 Jun 2023 18:47:17 +0800 Subject: [PATCH 48/62] Use portTASK_FUNCTION_PROTO to replace portNORETURN (#688) * Use portTASK_FUNCTION_PROTO to replace portNORETURN --- include/FreeRTOS.h | 4 ---- portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h | 1 - .../ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h | 1 - portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h | 1 - .../ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h | 1 - portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h | 1 - portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h | 1 - portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h | 1 - portable/GCC/ARM_CM0/portmacro.h | 1 - portable/GCC/ARM_CM23/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM3/portmacro.h | 1 - portable/GCC/ARM_CM33/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM35P/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM3_MPU/portmacro.h | 1 - portable/GCC/ARM_CM4F/portmacro.h | 1 - portable/GCC/ARM_CM4_MPU/portmacro.h | 1 - portable/GCC/ARM_CM55/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM7/r0p1/portmacro.h | 1 - portable/GCC/ARM_CM85/non_secure/portmacro.h | 1 - portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h | 1 - portable/ThirdParty/GCC/Posix/portmacro.h | 4 +--- portable/ThirdParty/GCC/RP2040/include/portmacro.h | 1 - portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h | 2 -- tasks.c | 4 ++-- timers.c | 2 +- 29 files changed, 4 insertions(+), 36 deletions(-) diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 44d5efdf269..b7d50541cbf 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -895,10 +895,6 @@ #define portDONT_DISCARD #endif -#ifndef portNORETURN - #define portNORETURN -#endif - #ifndef configUSE_TIME_SLICING #define configUSE_TIME_SLICING 1 #endif diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h index cc643459770..9545737c550 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM35P/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M35P" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h index c9bad40cf98..12bb5e7c4b9 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM55/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M55" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h index c45dd21c29e..99f913d3491 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM85/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M85" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM0/portmacro.h b/portable/GCC/ARM_CM0/portmacro.h index dc7f54578e2..14375bf9581 100644 --- a/portable/GCC/ARM_CM0/portmacro.h +++ b/portable/GCC/ARM_CM0/portmacro.h @@ -79,7 +79,6 @@ typedef unsigned long UBaseType_t; #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacro.h b/portable/GCC/ARM_CM23/non_secure/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h index 746f734b8ac..5fd94c1c371 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M23" #define portHAS_BASEPRI 0 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM3/portmacro.h b/portable/GCC/ARM_CM3/portmacro.h index 36e38f75d21..0ff96b82ae8 100644 --- a/portable/GCC/ARM_CM3/portmacro.h +++ b/portable/GCC/ARM_CM3/portmacro.h @@ -79,7 +79,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacro.h b/portable/GCC/ARM_CM33/non_secure/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h index 19da9b0ecfe..b9efb07ddc7 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M33" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacro.h b/portable/GCC/ARM_CM35P/non_secure/portmacro.h index cc643459770..9545737c550 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M35P" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h index cc643459770..9545737c550 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacro.h @@ -51,7 +51,6 @@ #define portARCH_NAME "Cortex-M35P" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index b15193f1ee5..e73447bf7e1 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -115,7 +115,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM4F/portmacro.h b/portable/GCC/ARM_CM4F/portmacro.h index 443661a6d3a..532eefa7405 100644 --- a/portable/GCC/ARM_CM4F/portmacro.h +++ b/portable/GCC/ARM_CM4F/portmacro.h @@ -82,7 +82,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index df23d95381f..f3365d14371 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -203,7 +203,6 @@ typedef struct MPU_SETTINGS #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacro.h b/portable/GCC/ARM_CM55/non_secure/portmacro.h index c9bad40cf98..12bb5e7c4b9 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M55" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h index c9bad40cf98..12bb5e7c4b9 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M55" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM7/r0p1/portmacro.h b/portable/GCC/ARM_CM7/r0p1/portmacro.h index 82529f998ad..f433beb52d0 100644 --- a/portable/GCC/ARM_CM7/r0p1/portmacro.h +++ b/portable/GCC/ARM_CM7/r0p1/portmacro.h @@ -79,7 +79,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* Scheduler utilities. */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacro.h b/portable/GCC/ARM_CM85/non_secure/portmacro.h index c45dd21c29e..99f913d3491 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M85" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h index c45dd21c29e..99f913d3491 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacro.h @@ -56,7 +56,6 @@ #define portARCH_NAME "Cortex-M85" #define portHAS_BASEPRI 1 #define portDONT_DISCARD __attribute__( ( used ) ) -#define portNORETURN __attribute__( ( noreturn ) ) /*-----------------------------------------------------------*/ /* ARMv8-M common port configurations. */ diff --git a/portable/ThirdParty/GCC/Posix/portmacro.h b/portable/ThirdParty/GCC/Posix/portmacro.h index df2db076e69..154a039758c 100644 --- a/portable/ThirdParty/GCC/Posix/portmacro.h +++ b/portable/ThirdParty/GCC/Posix/portmacro.h @@ -70,8 +70,6 @@ typedef unsigned long TickType_t; /*-----------------------------------------------------------*/ /* Architecture specifics. */ -#define portNORETURN __attribute__( ( noreturn ) ) - #define portSTACK_GROWTH ( -1 ) #define portHAS_STACK_OVERFLOW_CHECKING ( 1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) @@ -114,7 +112,7 @@ extern void vPortCancelThread( void *pxTaskToDelete ); #define portCLEAN_UP_TCB( pxTCB ) vPortCancelThread( pxTCB ) /*-----------------------------------------------------------*/ -#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) +#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters ) __attribute__( ( noreturn ) ) #define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters ) /*-----------------------------------------------------------*/ diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index 15f71811ea8..973ac39f035 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -80,7 +80,6 @@ #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 #define portDONT_DISCARD __attribute__( ( used ) ) - #define portNORETURN __attribute__( ( noreturn ) ) /* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config, * as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */ #define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS diff --git a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h index 575659ac597..e87d560cfcd 100644 --- a/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h +++ b/portable/ThirdParty/GCC/Xtensa_ESP32/include/portmacro.h @@ -335,8 +335,6 @@ /*-----------------------------------------------------------*/ /* Architecture specifics. */ - #define portNORETURN __attribute__( ( noreturn ) ) - #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 4 diff --git a/tasks.c b/tasks.c index 5548220abeb..5bdfff58d62 100644 --- a/tasks.c +++ b/tasks.c @@ -432,7 +432,7 @@ static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; * void prvIdleTask( void *pvParameters ); * */ -static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; /* * Utility to free all memory allocated by the scheduler to hold a TCB, @@ -3486,7 +3486,7 @@ void vTaskMissedYield( void ) * */ -portTASK_FUNCTION( prvIdleTask, pvParameters ) +static portTASK_FUNCTION( prvIdleTask, pvParameters ) { /* Stop warnings. */ ( void ) pvParameters; diff --git a/timers.c b/timers.c index 2c08db0af9d..457ab8f8fa3 100644 --- a/timers.c +++ b/timers.c @@ -158,7 +158,7 @@ * task. Other tasks communicate with the timer service task using the * xTimerQueue queue. */ - static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) portNORETURN PRIVILEGED_FUNCTION; + static portTASK_FUNCTION_PROTO( prvTimerTask, pvParameters ) PRIVILEGED_FUNCTION; /* * Called by the timer service task to interpret and process a command it From 9a6284262e583a24633e04d5df5ae71183c29150 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Wed, 21 Jun 2023 17:35:18 +0800 Subject: [PATCH 49/62] Fix typo in check comment of configMAX_SYSCALL_INTERRUPT_PRIORITY (#690) --- portable/ARMv8M/non_secure/port.c | 2 +- portable/CCS/ARM_CM3/port.c | 2 +- portable/CCS/ARM_CM4F/port.c | 2 +- portable/GCC/ARM_CM23/non_secure/port.c | 2 +- portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 2 +- portable/GCC/ARM_CM3/port.c | 2 +- portable/GCC/ARM_CM33/non_secure/port.c | 2 +- portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 2 +- portable/GCC/ARM_CM35P/non_secure/port.c | 2 +- portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 2 +- portable/GCC/ARM_CM3_MPU/port.c | 2 +- portable/GCC/ARM_CM4F/port.c | 2 +- portable/GCC/ARM_CM4_MPU/port.c | 2 +- portable/GCC/ARM_CM55/non_secure/port.c | 2 +- portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 2 +- portable/GCC/ARM_CM7/r0p1/port.c | 2 +- portable/GCC/ARM_CM85/non_secure/port.c | 2 +- portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 2 +- portable/IAR/ARM_CM23/non_secure/port.c | 2 +- portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 2 +- portable/IAR/ARM_CM3/port.c | 2 +- portable/IAR/ARM_CM33/non_secure/port.c | 2 +- portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 2 +- portable/IAR/ARM_CM35P/non_secure/port.c | 2 +- portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 2 +- portable/IAR/ARM_CM4F/port.c | 2 +- portable/IAR/ARM_CM4F_MPU/port.c | 2 +- portable/IAR/ARM_CM55/non_secure/port.c | 2 +- portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 2 +- portable/IAR/ARM_CM7/r0p1/port.c | 2 +- portable/IAR/ARM_CM85/non_secure/port.c | 2 +- portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 2 +- portable/MikroC/ARM_CM4F/port.c | 2 +- portable/RVDS/ARM_CM3/port.c | 2 +- portable/RVDS/ARM_CM4F/port.c | 2 +- portable/RVDS/ARM_CM4_MPU/port.c | 2 +- portable/RVDS/ARM_CM7/r0p1/port.c | 2 +- 37 files changed, 37 insertions(+), 37 deletions(-) diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/CCS/ARM_CM3/port.c b/portable/CCS/ARM_CM3/port.c index a04efc9115a..530e38ad3a2 100755 --- a/portable/CCS/ARM_CM3/port.c +++ b/portable/CCS/ARM_CM3/port.c @@ -249,7 +249,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/CCS/ARM_CM4F/port.c b/portable/CCS/ARM_CM4F/port.c index 1aff18b584a..a1fc5210e39 100755 --- a/portable/CCS/ARM_CM4F/port.c +++ b/portable/CCS/ARM_CM4F/port.c @@ -268,7 +268,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c index 198793bd52b..ac8185f5f73 100755 --- a/portable/GCC/ARM_CM3/port.c +++ b/portable/GCC/ARM_CM3/port.c @@ -292,7 +292,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM35P/non_secure/port.c +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index e9e4b57723f..1a3f9473178 100755 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -415,7 +415,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c index c59995f1a27..d18854e1beb 100755 --- a/portable/GCC/ARM_CM4F/port.c +++ b/portable/GCC/ARM_CM4F/port.c @@ -335,7 +335,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index 17b3aa93486..8a7f4c1a4e4 100755 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -458,7 +458,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM7/r0p1/port.c b/portable/GCC/ARM_CM7/r0p1/port.c index 0fcbf5048ff..aab077d7639 100755 --- a/portable/GCC/ARM_CM7/r0p1/port.c +++ b/portable/GCC/ARM_CM7/r0p1/port.c @@ -323,7 +323,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM3/port.c b/portable/IAR/ARM_CM3/port.c index 21b69bdd04a..1e3a3ded0df 100755 --- a/portable/IAR/ARM_CM3/port.c +++ b/portable/IAR/ARM_CM3/port.c @@ -241,7 +241,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM35P/non_secure/port.c +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM4F/port.c b/portable/IAR/ARM_CM4F/port.c index 0352fc3bdb4..52f5ac28709 100755 --- a/portable/IAR/ARM_CM4F/port.c +++ b/portable/IAR/ARM_CM4F/port.c @@ -279,7 +279,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 742518be5bf..2d70b339a3a 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -393,7 +393,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM7/r0p1/port.c b/portable/IAR/ARM_CM7/r0p1/port.c index 3baf38aaaed..c1a1cc5aa5a 100755 --- a/portable/IAR/ARM_CM7/r0p1/port.c +++ b/portable/IAR/ARM_CM7/r0p1/port.c @@ -267,7 +267,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index dbbb99507ef..88c45048fa1 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -1128,7 +1128,7 @@ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/MikroC/ARM_CM4F/port.c b/portable/MikroC/ARM_CM4F/port.c index 2fef250a1c8..91c924a05c7 100755 --- a/portable/MikroC/ARM_CM4F/port.c +++ b/portable/MikroC/ARM_CM4F/port.c @@ -329,7 +329,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/RVDS/ARM_CM3/port.c b/portable/RVDS/ARM_CM3/port.c index 054ab02eedc..91038fd69b5 100755 --- a/portable/RVDS/ARM_CM3/port.c +++ b/portable/RVDS/ARM_CM3/port.c @@ -294,7 +294,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/RVDS/ARM_CM4F/port.c b/portable/RVDS/ARM_CM4F/port.c index de48443bbe8..3c2040596b0 100755 --- a/portable/RVDS/ARM_CM4F/port.c +++ b/portable/RVDS/ARM_CM4F/port.c @@ -360,7 +360,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index fdf1ff95b02..ac35afb7b08 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -453,7 +453,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); diff --git a/portable/RVDS/ARM_CM7/r0p1/port.c b/portable/RVDS/ARM_CM7/r0p1/port.c index ad9971c8d82..8414acc99e6 100755 --- a/portable/RVDS/ARM_CM7/r0p1/port.c +++ b/portable/RVDS/ARM_CM7/r0p1/port.c @@ -344,7 +344,7 @@ BaseType_t xPortStartScheduler( void ) * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ configASSERT( ucMaxSysCallPriority ); - /* Check that the bits not implemented bits in hardware are zero in + /* Check that the bits not implemented in hardware are zero in * configMAX_SYSCALL_INTERRUPT_PRIORITY. */ configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U ); From aa012e8d82bd05f8747c54b2e89694c9673bce4e Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 26 Jun 2023 10:49:59 +0800 Subject: [PATCH 50/62] Add constant type for portMAX_DELAY in port (#691) Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/GCC/AVR32_UC3/portmacro.h | 2 +- portable/GCC/CORTUS_APS3/portmacro.h | 2 +- portable/GCC/ColdFire_V2/portmacro.h | 2 +- portable/GCC/H8S2329/portmacro.h | 2 +- portable/GCC/HCS12/portmacro.h | 2 +- portable/GCC/MSP430F449/portmacro.h | 2 +- portable/GCC/PPC405_Xilinx/portmacro.h | 2 +- portable/GCC/PPC440_Xilinx/portmacro.h | 2 +- portable/GCC/RL78/portmacro.h | 2 +- portable/GCC/STR75x/portmacro.h | 2 +- portable/IAR/78K0R/portmacro.h | 2 +- portable/IAR/ATMega323/portmacro.h | 2 +- portable/IAR/AVR32_UC3/portmacro.h | 2 +- portable/IAR/AVR_AVRDx/portmacro.h | 2 +- portable/IAR/AVR_Mega0/portmacro.h | 2 +- portable/IAR/AtmelSAM7S64/portmacro.h | 2 +- portable/IAR/AtmelSAM9XE/portmacro.h | 2 +- portable/IAR/LPC2000/portmacro.h | 2 +- portable/IAR/MSP430/portmacro.h | 2 +- portable/IAR/MSP430X/portmacro.h | 2 +- portable/IAR/RL78/portmacro.h | 2 +- portable/WizC/PIC18/portmacro.h | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) diff --git a/portable/GCC/AVR32_UC3/portmacro.h b/portable/GCC/AVR32_UC3/portmacro.h index 5bc7c8b0877..ebf5010919d 100644 --- a/portable/GCC/AVR32_UC3/portmacro.h +++ b/portable/GCC/AVR32_UC3/portmacro.h @@ -116,7 +116,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/CORTUS_APS3/portmacro.h b/portable/GCC/CORTUS_APS3/portmacro.h index 37e739bb55f..9d2d0e0cdb5 100644 --- a/portable/GCC/CORTUS_APS3/portmacro.h +++ b/portable/GCC/CORTUS_APS3/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/ColdFire_V2/portmacro.h b/portable/GCC/ColdFire_V2/portmacro.h index 14d318b7b94..06ddbd88764 100644 --- a/portable/GCC/ColdFire_V2/portmacro.h +++ b/portable/GCC/ColdFire_V2/portmacro.h @@ -63,7 +63,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/H8S2329/portmacro.h b/portable/GCC/H8S2329/portmacro.h index 829ff9f9b64..a5612cadfb8 100644 --- a/portable/GCC/H8S2329/portmacro.h +++ b/portable/GCC/H8S2329/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/HCS12/portmacro.h b/portable/GCC/HCS12/portmacro.h index a864419899f..a11b96c37d0 100644 --- a/portable/GCC/HCS12/portmacro.h +++ b/portable/GCC/HCS12/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/MSP430F449/portmacro.h b/portable/GCC/MSP430F449/portmacro.h index 5a6e1db9c94..2f70a24d1b2 100644 --- a/portable/GCC/MSP430F449/portmacro.h +++ b/portable/GCC/MSP430F449/portmacro.h @@ -63,7 +63,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/PPC405_Xilinx/portmacro.h b/portable/GCC/PPC405_Xilinx/portmacro.h index e6f1c06c845..06cca5df9ed 100644 --- a/portable/GCC/PPC405_Xilinx/portmacro.h +++ b/portable/GCC/PPC405_Xilinx/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/PPC440_Xilinx/portmacro.h b/portable/GCC/PPC440_Xilinx/portmacro.h index e6f1c06c845..06cca5df9ed 100644 --- a/portable/GCC/PPC440_Xilinx/portmacro.h +++ b/portable/GCC/PPC440_Xilinx/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/RL78/portmacro.h b/portable/GCC/RL78/portmacro.h index 8108f1c1221..34940200838 100644 --- a/portable/GCC/RL78/portmacro.h +++ b/portable/GCC/RL78/portmacro.h @@ -59,7 +59,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/GCC/STR75x/portmacro.h b/portable/GCC/STR75x/portmacro.h index 8d5e832c59e..876783dcb67 100644 --- a/portable/GCC/STR75x/portmacro.h +++ b/portable/GCC/STR75x/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/78K0R/portmacro.h b/portable/IAR/78K0R/portmacro.h index 4ccc083c080..03bb5dee7ef 100644 --- a/portable/IAR/78K0R/portmacro.h +++ b/portable/IAR/78K0R/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/ATMega323/portmacro.h b/portable/IAR/ATMega323/portmacro.h index 69ba2f1d9ac..cbc7b2d156b 100644 --- a/portable/IAR/ATMega323/portmacro.h +++ b/portable/IAR/ATMega323/portmacro.h @@ -71,7 +71,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AVR32_UC3/portmacro.h b/portable/IAR/AVR32_UC3/portmacro.h index 73c206ca5f8..98aa5be0745 100644 --- a/portable/IAR/AVR32_UC3/portmacro.h +++ b/portable/IAR/AVR32_UC3/portmacro.h @@ -119,7 +119,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AVR_AVRDx/portmacro.h b/portable/IAR/AVR_AVRDx/portmacro.h index 5b4b5be8b3c..cb3cb7ec201 100644 --- a/portable/IAR/AVR_AVRDx/portmacro.h +++ b/portable/IAR/AVR_AVRDx/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AVR_Mega0/portmacro.h b/portable/IAR/AVR_Mega0/portmacro.h index 5b4b5be8b3c..cb3cb7ec201 100644 --- a/portable/IAR/AVR_Mega0/portmacro.h +++ b/portable/IAR/AVR_Mega0/portmacro.h @@ -65,7 +65,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AtmelSAM7S64/portmacro.h b/portable/IAR/AtmelSAM7S64/portmacro.h index 01c6eed6aad..9bcf8a67003 100644 --- a/portable/IAR/AtmelSAM7S64/portmacro.h +++ b/portable/IAR/AtmelSAM7S64/portmacro.h @@ -64,7 +64,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/AtmelSAM9XE/portmacro.h b/portable/IAR/AtmelSAM9XE/portmacro.h index db6f10cca85..fdc7c2ba684 100644 --- a/portable/IAR/AtmelSAM9XE/portmacro.h +++ b/portable/IAR/AtmelSAM9XE/portmacro.h @@ -67,7 +67,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/LPC2000/portmacro.h b/portable/IAR/LPC2000/portmacro.h index 8fdf1fe75a3..2929ecba54a 100644 --- a/portable/IAR/LPC2000/portmacro.h +++ b/portable/IAR/LPC2000/portmacro.h @@ -67,7 +67,7 @@ typedef unsigned long UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/MSP430/portmacro.h b/portable/IAR/MSP430/portmacro.h index 298307f5519..6dcec2f0314 100644 --- a/portable/IAR/MSP430/portmacro.h +++ b/portable/IAR/MSP430/portmacro.h @@ -58,7 +58,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/MSP430X/portmacro.h b/portable/IAR/MSP430X/portmacro.h index 1ab4665b9bf..72456cf11c0 100644 --- a/portable/IAR/MSP430X/portmacro.h +++ b/portable/IAR/MSP430X/portmacro.h @@ -67,7 +67,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/IAR/RL78/portmacro.h b/portable/IAR/RL78/portmacro.h index 9685b569d5d..1cfe85fa650 100644 --- a/portable/IAR/RL78/portmacro.h +++ b/portable/IAR/RL78/portmacro.h @@ -82,7 +82,7 @@ typedef unsigned short UBaseType_t; #define portMAX_DELAY ( TickType_t ) 0xffff #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif diff --git a/portable/WizC/PIC18/portmacro.h b/portable/WizC/PIC18/portmacro.h index ad70560e4ad..0fdba688eee 100644 --- a/portable/WizC/PIC18/portmacro.h +++ b/portable/WizC/PIC18/portmacro.h @@ -63,7 +63,7 @@ typedef unsigned char UBaseType_t; #define portMAX_DELAY ( TickType_t ) ( 0xFFFF ) #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS ) typedef uint32_t TickType_t; - #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFF ) + #define portMAX_DELAY ( TickType_t ) ( 0xFFFFFFFFUL ) #else #error configTICK_TYPE_WIDTH_IN_BITS set to unsupported tick type width. #endif From 788f8cfd7693da0b26a75c6758519d3f514ef497 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 26 Jun 2023 15:44:33 +0800 Subject: [PATCH 51/62] Update static stream buffer size check (#693) * Use volatile size instead of sizeof directly to prevent always true/false warning --- stream_buffer.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/stream_buffer.c b/stream_buffer.c index cbc0939b3cd..2306b5d9b65 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -436,11 +436,13 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer, configASSERT( xBufferSizeBytes > sbBYTES_TO_STORE_MESSAGE_LENGTH ); #if ( configASSERT_DEFINED == 1 ) - + { /* Sanity check that the size of the structure used to declare a * variable of type StaticStreamBuffer_t equals the size of the real * message buffer structure. */ - configASSERT( sizeof( StaticStreamBuffer_t ) == sizeof( StreamBuffer_t ) ); + volatile size_t xSize = sizeof( StaticStreamBuffer_t ); + configASSERT( xSize == sizeof( StreamBuffer_t ) ); + } /*lint !e529 xSize is referenced is configASSERT() is defined. */ #endif /* configASSERT_DEFINED */ if( ( pucStreamBufferStorageArea != NULL ) && ( pxStaticStreamBuffer != NULL ) ) From 891dcdf80a21b85edf36b00d80a0b72a535d2b87 Mon Sep 17 00:00:00 2001 From: Evgeny Ermakov <22344340+unspecd@users.noreply.github.com> Date: Mon, 26 Jun 2023 18:13:10 +0000 Subject: [PATCH 52/62] Fix typos in comments for the AT91SAM7S port (#695) Co-authored-by: RichardBarry <3073890+RichardBarry@users.noreply.github.com> --- portable/GCC/ARM7_AT91SAM7S/port.c | 2 +- portable/IAR/AtmelSAM7S64/port.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/portable/GCC/ARM7_AT91SAM7S/port.c b/portable/GCC/ARM7_AT91SAM7S/port.c index 335f9852626..a9d78bb138b 100644 --- a/portable/GCC/ARM7_AT91SAM7S/port.c +++ b/portable/GCC/ARM7_AT91SAM7S/port.c @@ -204,7 +204,7 @@ AT91PS_PITC pxPIT = AT91C_BASE_PITC; /* Configure the PIT period. */ pxPIT->PITC_PIMR = portPIT_ENABLE | portPIT_INT_ENABLE | portPIT_COUNTER_VALUE; - /* Enable the interrupt. Global interrupts are disables at this point so + /* Enable the interrupt. Global interrupts are disabled at this point so this is safe. */ AT91C_BASE_AIC->AIC_IECR = 0x1 << AT91C_ID_SYS; } diff --git a/portable/IAR/AtmelSAM7S64/port.c b/portable/IAR/AtmelSAM7S64/port.c index 1e14d2e4aaa..32489702348 100644 --- a/portable/IAR/AtmelSAM7S64/port.c +++ b/portable/IAR/AtmelSAM7S64/port.c @@ -218,7 +218,7 @@ AT91PS_PITC pxPIT = AT91C_BASE_PITC; /* Configure the PIT period. */ pxPIT->PITC_PIMR = portPIT_ENABLE | portPIT_INT_ENABLE | portPIT_COUNTER_VALUE; - /* Enable the interrupt. Global interrupts are disables at this point so + /* Enable the interrupt. Global interrupts are disabled at this point so this is safe. */ AT91F_AIC_EnableIt( AT91C_BASE_AIC, AT91C_ID_SYS ); } From 1c5eca348f1c63e1e117a73d42343686cb07a1ef Mon Sep 17 00:00:00 2001 From: Joris Putcuyps Date: Thu, 29 Jun 2023 20:18:51 +0200 Subject: [PATCH 53/62] Fix #697: Missing portPOINTER_SIZE_TYPE definition for ATmega port (#698) --- portable/ThirdParty/GCC/ATmega/portmacro.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/portable/ThirdParty/GCC/ATmega/portmacro.h b/portable/ThirdParty/GCC/ATmega/portmacro.h index 8292f2d7ed9..e48ddb2195e 100644 --- a/portable/ThirdParty/GCC/ATmega/portmacro.h +++ b/portable/ThirdParty/GCC/ATmega/portmacro.h @@ -54,6 +54,8 @@ #define portLONG long #define portSHORT int +#define portPOINTER_SIZE_TYPE uint16_t + typedef uint8_t StackType_t; typedef int8_t BaseType_t; typedef uint8_t UBaseType_t; From d0a490e49149244dfe2b1f5baa099963c64afa1b Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Thu, 6 Jul 2023 10:44:24 +0800 Subject: [PATCH 54/62] Remove empty expression statement compiler warning (#692) * Add do while( 0 ) loop for empty expression statement compiler warning --- croutine.c | 4 ++-- include/croutine.h | 22 ++++++++++++---------- include/semphr.h | 4 ++-- include/stack_macros.h | 12 ++++++------ stream_buffer.c | 8 ++++---- tasks.c | 4 ++-- 6 files changed, 28 insertions(+), 26 deletions(-) diff --git a/croutine.c b/croutine.c index 02b99ded3f2..f38e96247a3 100644 --- a/croutine.c +++ b/croutine.c @@ -66,13 +66,13 @@ * used from within an ISR. */ #define prvAddCoRoutineToReadyQueue( pxCRCB ) \ - { \ + do { \ if( ( pxCRCB )->uxPriority > uxTopCoRoutineReadyPriority ) \ { \ uxTopCoRoutineReadyPriority = ( pxCRCB )->uxPriority; \ } \ vListInsertEnd( ( List_t * ) &( pxReadyCoRoutineLists[ ( pxCRCB )->uxPriority ] ), &( ( pxCRCB )->xGenericListItem ) ); \ - } + } while( 0 ) /* * Utility to ready all the lists used by the scheduler. This is called diff --git a/include/croutine.h b/include/croutine.h index 96e39723080..664e38b3764 100644 --- a/include/croutine.h +++ b/include/croutine.h @@ -307,12 +307,14 @@ void vCoRoutineSchedule( void ); * \defgroup crDELAY crDELAY * \ingroup Tasks */ -#define crDELAY( xHandle, xTicksToDelay ) \ - if( ( xTicksToDelay ) > 0 ) \ - { \ - vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ - } \ - crSET_STATE0( ( xHandle ) ); +#define crDELAY( xHandle, xTicksToDelay ) \ + do { \ + if( ( xTicksToDelay ) > 0 ) \ + { \ + vCoRoutineAddToDelayedList( ( xTicksToDelay ), NULL ); \ + } \ + crSET_STATE0( ( xHandle ) ); \ + } while( 0 ) /** * @code{c} @@ -400,7 +402,7 @@ void vCoRoutineSchedule( void ); * \ingroup Tasks */ #define crQUEUE_SEND( xHandle, pxQueue, pvItemToQueue, xTicksToWait, pxResult ) \ - { \ + do { \ *( pxResult ) = xQueueCRSend( ( pxQueue ), ( pvItemToQueue ), ( xTicksToWait ) ); \ if( *( pxResult ) == errQUEUE_BLOCKED ) \ { \ @@ -412,7 +414,7 @@ void vCoRoutineSchedule( void ); crSET_STATE1( ( xHandle ) ); \ *pxResult = pdPASS; \ } \ - } + } while( 0 ) /** * croutine. h @@ -494,7 +496,7 @@ void vCoRoutineSchedule( void ); * \ingroup Tasks */ #define crQUEUE_RECEIVE( xHandle, pxQueue, pvBuffer, xTicksToWait, pxResult ) \ - { \ + do { \ *( pxResult ) = xQueueCRReceive( ( pxQueue ), ( pvBuffer ), ( xTicksToWait ) ); \ if( *( pxResult ) == errQUEUE_BLOCKED ) \ { \ @@ -506,7 +508,7 @@ void vCoRoutineSchedule( void ); crSET_STATE1( ( xHandle ) ); \ *( pxResult ) = pdPASS; \ } \ - } + } while( 0 ) /** * croutine. h diff --git a/include/semphr.h b/include/semphr.h index 7977a01b84e..740be5a5d12 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -95,13 +95,13 @@ typedef QueueHandle_t SemaphoreHandle_t; */ #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) #define vSemaphoreCreateBinary( xSemaphore ) \ - { \ + do { \ ( xSemaphore ) = xQueueGenericCreate( ( UBaseType_t ) 1, semSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_BINARY_SEMAPHORE ); \ if( ( xSemaphore ) != NULL ) \ { \ ( void ) xSemaphoreGive( ( xSemaphore ) ); \ } \ - } + } while( 0 ) #endif /** diff --git a/include/stack_macros.h b/include/stack_macros.h index 7ffc7b34338..7ead99ffd0f 100644 --- a/include/stack_macros.h +++ b/include/stack_macros.h @@ -57,13 +57,13 @@ /* Only the current stack state is to be checked. */ #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ /* Is the currently saved stack pointer within the stack limit? */ \ if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack + portSTACK_LIMIT_PADDING ) \ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ /*-----------------------------------------------------------*/ @@ -72,14 +72,14 @@ /* Only the current stack state is to be checked. */ #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ \ /* Is the currently saved stack pointer within the stack limit? */ \ if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack - portSTACK_LIMIT_PADDING ) \ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ /*-----------------------------------------------------------*/ @@ -106,7 +106,7 @@ #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) #define taskCHECK_FOR_STACK_OVERFLOW() \ - { \ + do { \ int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ @@ -122,7 +122,7 @@ { \ vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ } \ - } + } while( 0 ) #endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ /*-----------------------------------------------------------*/ diff --git a/stream_buffer.c b/stream_buffer.c index 2306b5d9b65..30093f1af8b 100644 --- a/stream_buffer.c +++ b/stream_buffer.c @@ -78,7 +78,7 @@ */ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvRECEIVE_COMPLETED( pxStreamBuffer ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdFALSE, NULL ); \ @@ -87,7 +87,7 @@ { \ sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvRECEIVE_COMPLETED( pxStreamBuffer ) sbRECEIVE_COMPLETED( ( pxStreamBuffer ) ) #endif /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ @@ -116,7 +116,7 @@ #if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, \ pxHigherPriorityTaskWoken ) \ - { \ + do { \ if( ( pxStreamBuffer )->pxReceiveCompletedCallback != NULL ) \ { \ ( pxStreamBuffer )->pxReceiveCompletedCallback( ( pxStreamBuffer ), pdTRUE, ( pxHigherPriorityTaskWoken ) ); \ @@ -125,7 +125,7 @@ { \ sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ); \ } \ - } + } while( 0 ) #else /* if ( configUSE_SB_COMPLETED_CALLBACK == 1 ) */ #define prvRECEIVE_COMPLETED_FROM_ISR( pxStreamBuffer, pxHigherPriorityTaskWoken ) \ sbRECEIVE_COMPLETED_FROM_ISR( ( pxStreamBuffer ), ( pxHigherPriorityTaskWoken ) ) diff --git a/tasks.c b/tasks.c index 5bdfff58d62..e2861dd94aa 100644 --- a/tasks.c +++ b/tasks.c @@ -124,12 +124,12 @@ /* uxTopReadyPriority holds the priority of the highest priority ready * state task. */ #define taskRECORD_READY_PRIORITY( uxPriority ) \ - { \ + do { \ if( ( uxPriority ) > uxTopReadyPriority ) \ { \ uxTopReadyPriority = ( uxPriority ); \ } \ - } /* taskRECORD_READY_PRIORITY */ + } while( 0 ) /* taskRECORD_READY_PRIORITY */ /*-----------------------------------------------------------*/ From 77ec05e641ab602004aaa7b6e7118df8a78f4d2e Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 10 Jul 2023 14:29:38 +0800 Subject: [PATCH 55/62] Update uxTaskGetSystemState for tasks in pending ready list (#702) * Update uxTaskGetSystemState to sync with eTaskGetState * Update in vTaskGetInfo for tasks in pending ready list should be in ready state. --- tasks.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tasks.c b/tasks.c index e2861dd94aa..ed3cd1ee2ed 100644 --- a/tasks.c +++ b/tasks.c @@ -3843,6 +3843,18 @@ static void prvCheckTasksWaitingTermination( void ) } } #endif /* INCLUDE_vTaskSuspend */ + + /* Tasks can be in pending ready list and other state list at the + * same time. These tasks are in ready state no matter what state + * list the task is in. */ + taskENTER_CRITICAL(); + { + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE ) + { + pxTaskStatus->eCurrentState = eReady; + } + } + taskEXIT_CRITICAL(); } } else From 18e293723929da31c0395fb4c7f9187474058a6a Mon Sep 17 00:00:00 2001 From: Patrick Cook <114708437+cookpate@users.noreply.github.com> Date: Mon, 10 Jul 2023 15:08:59 -0700 Subject: [PATCH 56/62] Fix circular dependency in CMake project (#700) * Fix circular dependency in cmake project Fix for https://github.com/FreeRTOS/FreeRTOS-Kernel/issues/687 In order for custom ports to also break the cycle, they must link against freertos_kernel_include instead of freertos_kernel. * Simplify include path --- CMakeLists.txt | 12 +++--------- include/CMakeLists.txt | 15 +++++++++++++++ portable/CMakeLists.txt | 2 +- 3 files changed, 19 insertions(+), 10 deletions(-) create mode 100644 include/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 46a9e180796..a68b652cada 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -225,7 +225,7 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " .)\n" " target_link_libraries(freertos_kernel_port\n" " PRIVATE\n" - " freertos_kernel)") + " freertos_kernel_include)") endif() ######################################################################## @@ -264,6 +264,7 @@ add_compile_options( ######################################################################## +add_subdirectory(include) add_subdirectory(portable) add_library(freertos_kernel STATIC @@ -279,17 +280,10 @@ add_library(freertos_kernel STATIC $>,${FREERTOS_HEAP},portable/MemMang/heap_${FREERTOS_HEAP}.c> ) -target_include_directories(freertos_kernel - PUBLIC - include - # Note: DEPRECATED but still supported, may be removed in a future release. - $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> -) - target_link_libraries(freertos_kernel PUBLIC - $<$:freertos_config> freertos_kernel_port + freertos_kernel_include ) ######################################################################## diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt new file mode 100644 index 00000000000..46a1c3e7f5f --- /dev/null +++ b/include/CMakeLists.txt @@ -0,0 +1,15 @@ +# FreeRTOS internal cmake file. Do not use it in user top-level project + +add_library(freertos_kernel_include INTERFACE) + +target_include_directories(freertos_kernel_include + INTERFACE + . + # Note: DEPRECATED but still supported, may be removed in a future release. + $<$>:${FREERTOS_CONFIG_FILE_DIRECTORY}> +) + +target_link_libraries(freertos_kernel_include + INTERFACE + $<$:freertos_config> +) diff --git a/portable/CMakeLists.txt b/portable/CMakeLists.txt index 2824df05dc0..988be152406 100644 --- a/portable/CMakeLists.txt +++ b/portable/CMakeLists.txt @@ -1035,7 +1035,7 @@ target_link_libraries(freertos_kernel_port $<$:pico_base_headers> $<$:idf::esp32> PRIVATE - freertos_kernel + freertos_kernel_include $<$:Threads::Threads> "$<$:hardware_clocks;hardware_exception>" $<$:winmm> # Windows library which implements timers From 97050a17aa8ad5fd7a7789874b38083dbce1fb0b Mon Sep 17 00:00:00 2001 From: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Date: Thu, 13 Jul 2023 16:51:04 +0530 Subject: [PATCH 57/62] Memory Protection Unit (MPU) Enhancements (#705) Memory Protection Unit (MPU) Enhancements This commit introduces a new MPU wrapper that places additional restrictions on unprivileged tasks. The following is the list of changes introduced with the new MPU wrapper: 1. Opaque and indirectly verifiable integers for kernel object handles: All the kernel object handles (for example, queue handles) are now opaque integers. Previously object handles were raw pointers. 2. Saving the task context in Task Control Block (TCB): When a task is swapped out by the scheduler, the task's context is now saved in its TCB. Previously the task's context was saved on its stack. 3. Execute system calls on a separate privileged only stack: FreeRTOS system calls, which execute with elevated privilege, now use a separate privileged only stack. Previously system calls used the calling task's stack. The application writer can control the size of the system call stack using new configSYSTEM_CALL_STACK_SIZE config macro. 4. Memory bounds checks: FreeRTOS system calls which accept a pointer and de-reference it, now verify that the calling task has required permissions to access the memory location referenced by the pointer. 5. System call restrictions: The following system calls are no longer available to unprivileged tasks: - vQueueDelete - xQueueCreateMutex - xQueueCreateMutexStatic - xQueueCreateCountingSemaphore - xQueueCreateCountingSemaphoreStatic - xQueueGenericCreate - xQueueGenericCreateStatic - xQueueCreateSet - xQueueRemoveFromSet - xQueueGenericReset - xTaskCreate - xTaskCreateStatic - vTaskDelete - vTaskPrioritySet - vTaskSuspendAll - xTaskResumeAll - xTaskGetHandle - xTaskCallApplicationTaskHook - vTaskList - vTaskGetRunTimeStats - xTaskCatchUpTicks - xEventGroupCreate - xEventGroupCreateStatic - vEventGroupDelete - xStreamBufferGenericCreate - xStreamBufferGenericCreateStatic - vStreamBufferDelete - xStreamBufferReset Also, an unprivileged task can no longer use vTaskSuspend to suspend any task other than itself. We thank the following people for their inputs in these enhancements: - David Reiss of Meta Platforms, Inc. - Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling of School of Computer Science and Engineering, Southeast University, China. - Xinwen Fu of Department of Computer Science, University of Massachusetts Lowell, USA. - Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado Boulder, USA. --- .github/lexicon.txt | 1 + include/FreeRTOS.h | 5 + include/mpu_prototypes.h | 241 +- include/mpu_wrappers.h | 232 +- include/portable.h | 24 +- include/queue.h | 2 +- include/task.h | 13 + portable/ARMv8M/copy_files.py | 12 + portable/ARMv8M/non_secure/port.c | 726 ++- .../GCC/ARM_CM23/mpu_wrappers_v2_asm.c | 2419 ++++++++++ .../portable/GCC/ARM_CM23/portasm.c | 558 ++- .../GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c | 2419 ++++++++++ .../portable/GCC/ARM_CM23_NTZ/portasm.c | 481 +- .../GCC/ARM_CM33/mpu_wrappers_v2_asm.c | 2349 ++++++++++ .../portable/GCC/ARM_CM33/portasm.c | 528 ++- .../GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c | 2349 ++++++++++ .../portable/GCC/ARM_CM33_NTZ/portasm.c | 412 +- .../IAR/ARM_CM23/mpu_wrappers_v2_asm.S | 1623 +++++++ .../portable/IAR/ARM_CM23/portasm.s | 425 +- .../IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S | 1623 +++++++ .../portable/IAR/ARM_CM23_NTZ/portasm.s | 374 +- .../IAR/ARM_CM33/mpu_wrappers_v2_asm.S | 1552 +++++++ .../portable/IAR/ARM_CM33/portasm.s | 359 +- .../IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S | 1552 +++++++ .../portable/IAR/ARM_CM33_NTZ/portasm.s | 302 +- portable/ARMv8M/non_secure/portmacrocommon.h | 146 +- portable/Common/mpu_wrappers.c | 4 +- portable/Common/mpu_wrappers_v2.c | 4121 +++++++++++++++++ .../ARM_CM23/non_secure/mpu_wrappers_v2_asm.c | 2419 ++++++++++ portable/GCC/ARM_CM23/non_secure/port.c | 726 ++- portable/GCC/ARM_CM23/non_secure/portasm.c | 558 ++- .../GCC/ARM_CM23/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2419 ++++++++++ portable/GCC/ARM_CM23_NTZ/non_secure/port.c | 726 ++- .../GCC/ARM_CM23_NTZ/non_secure/portasm.c | 481 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 146 +- .../ARM_CM33/non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM33/non_secure/port.c | 726 ++- portable/GCC/ARM_CM33/non_secure/portasm.c | 528 ++- .../GCC/ARM_CM33/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM33_NTZ/non_secure/port.c | 726 ++- .../GCC/ARM_CM33_NTZ/non_secure/portasm.c | 412 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM35P/non_secure/port.c | 726 ++- portable/GCC/ARM_CM35P/non_secure/portasm.c | 528 ++- .../ARM_CM35P/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM35P_NTZ/non_secure/port.c | 726 ++- .../GCC/ARM_CM35P_NTZ/non_secure/portasm.c | 412 +- .../non_secure/portmacrocommon.h | 146 +- .../GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM3_MPU/port.c | 668 ++- portable/GCC/ARM_CM3_MPU/portmacro.h | 56 +- .../GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM4_MPU/port.c | 794 +++- portable/GCC/ARM_CM4_MPU/portmacro.h | 55 +- .../ARM_CM55/non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM55/non_secure/port.c | 726 ++- portable/GCC/ARM_CM55/non_secure/portasm.c | 528 ++- .../GCC/ARM_CM55/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM55_NTZ/non_secure/port.c | 726 ++- .../GCC/ARM_CM55_NTZ/non_secure/portasm.c | 412 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 146 +- .../ARM_CM85/non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM85/non_secure/port.c | 726 ++- portable/GCC/ARM_CM85/non_secure/portasm.c | 528 ++- .../GCC/ARM_CM85/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.c | 2349 ++++++++++ portable/GCC/ARM_CM85_NTZ/non_secure/port.c | 726 ++- .../GCC/ARM_CM85_NTZ/non_secure/portasm.c | 412 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 146 +- .../ARM_CM23/non_secure/mpu_wrappers_v2_asm.S | 1623 +++++++ portable/IAR/ARM_CM23/non_secure/port.c | 726 ++- portable/IAR/ARM_CM23/non_secure/portasm.s | 425 +- .../IAR/ARM_CM23/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1623 +++++++ portable/IAR/ARM_CM23_NTZ/non_secure/port.c | 726 ++- .../IAR/ARM_CM23_NTZ/non_secure/portasm.s | 374 +- .../ARM_CM23_NTZ/non_secure/portmacrocommon.h | 146 +- .../ARM_CM33/non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM33/non_secure/port.c | 726 ++- portable/IAR/ARM_CM33/non_secure/portasm.s | 359 +- .../IAR/ARM_CM33/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM33_NTZ/non_secure/port.c | 726 ++- .../IAR/ARM_CM33_NTZ/non_secure/portasm.s | 302 +- .../ARM_CM33_NTZ/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM35P/non_secure/port.c | 726 ++- portable/IAR/ARM_CM35P/non_secure/portasm.s | 359 +- .../ARM_CM35P/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM35P_NTZ/non_secure/port.c | 726 ++- .../IAR/ARM_CM35P_NTZ/non_secure/portasm.s | 302 +- .../non_secure/portmacrocommon.h | 146 +- .../IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S | 1556 +++++++ portable/IAR/ARM_CM4F_MPU/port.c | 519 ++- portable/IAR/ARM_CM4F_MPU/portasm.s | 281 +- portable/IAR/ARM_CM4F_MPU/portmacro.h | 55 +- .../ARM_CM55/non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM55/non_secure/port.c | 726 ++- portable/IAR/ARM_CM55/non_secure/portasm.s | 359 +- .../IAR/ARM_CM55/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM55_NTZ/non_secure/port.c | 726 ++- .../IAR/ARM_CM55_NTZ/non_secure/portasm.s | 302 +- .../ARM_CM55_NTZ/non_secure/portmacrocommon.h | 146 +- .../ARM_CM85/non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM85/non_secure/port.c | 726 ++- portable/IAR/ARM_CM85/non_secure/portasm.s | 359 +- .../IAR/ARM_CM85/non_secure/portmacrocommon.h | 146 +- .../non_secure/mpu_wrappers_v2_asm.S | 1552 +++++++ portable/IAR/ARM_CM85_NTZ/non_secure/port.c | 726 ++- .../IAR/ARM_CM85_NTZ/non_secure/portasm.s | 302 +- .../ARM_CM85_NTZ/non_secure/portmacrocommon.h | 146 +- .../RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c | 1993 ++++++++ portable/RVDS/ARM_CM4_MPU/port.c | 756 ++- portable/RVDS/ARM_CM4_MPU/portmacro.h | 55 +- queue.c | 6 + tasks.c | 21 +- 123 files changed, 94813 insertions(+), 6526 deletions(-) create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S create mode 100644 portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S create mode 100644 portable/Common/mpu_wrappers_v2.c create mode 100644 portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c create mode 100644 portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S create mode 100644 portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c diff --git a/.github/lexicon.txt b/.github/lexicon.txt index 1a7d4852dc2..ec6577e9e62 100644 --- a/.github/lexicon.txt +++ b/.github/lexicon.txt @@ -2468,6 +2468,7 @@ uxportcomparesetextram uxpriority uxprioritytouse uxqueue +uxqueuegetqueueitemsize uxqueuelength uxqueuemessageswaiting uxqueuespacesavailable diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index b7d50541cbf..831ed037b1f 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -81,6 +81,11 @@ #endif #endif +/* Set configUSE_MPU_WRAPPERS_V1 to 1 to use MPU wrappers v1. */ +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + /* Basic FreeRTOS definitions. */ #include "projdefs.h" diff --git a/include/mpu_prototypes.h b/include/mpu_prototypes.h index 08fa0519317..633efd4a817 100644 --- a/include/mpu_prototypes.h +++ b/include/mpu_prototypes.h @@ -39,20 +39,6 @@ #define MPU_PROTOTYPES_H /* MPU versions of task.h API functions. */ -BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, - const char * const pcName, - const uint16_t usStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - TaskHandle_t * const pxCreatedTask ) FREERTOS_SYSTEM_CALL; -TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, - const char * const pcName, - const uint32_t ulStackDepth, - void * const pvParameters, - UBaseType_t uxPriority, - StackType_t * const puxStackBuffer, - StaticTask_t * const pxTaskBuffer ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) FREERTOS_SYSTEM_CALL; void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; @@ -63,17 +49,11 @@ void MPU_vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t * pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskPrioritySet( TaskHandle_t xTask, - UBaseType_t uxNewPriority ) FREERTOS_SYSTEM_CALL; void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskStartScheduler( void ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskSuspendAll( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskResumeAll( void ) FREERTOS_SYSTEM_CALL; TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; -TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, @@ -84,16 +64,14 @@ void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, void * pvValue ) FREERTOS_SYSTEM_CALL; void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, - void * pvParameter ) FREERTOS_SYSTEM_CALL; TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskList( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskGetRunTimeStats( char * pcWriteBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, @@ -112,14 +90,55 @@ BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskIncrementTick( void ) FREERTOS_SYSTEM_CALL; -TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; -void MPU_vTaskMissedYield( void ) FREERTOS_SYSTEM_CALL; +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) FREERTOS_SYSTEM_CALL; + +/* Privileged only wrappers for Task APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +BaseType_t MPU_xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint16_t usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +void MPU_vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; +void MPU_vTaskPrioritySet( TaskHandle_t xTask, + UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; +TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, + void * pvParameter ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) PRIVILEGED_FUNCTION; +UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; +TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; /* MPU versions of queue.h API functions. */ BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, @@ -136,15 +155,6 @@ BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; -void MPU_vQueueDelete( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, - StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, - const UBaseType_t uxInitialCount ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, - const UBaseType_t uxInitialCount, - StaticQueue_t * pxStaticQueue ) FREERTOS_SYSTEM_CALL; TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; @@ -153,65 +163,97 @@ void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, const char * pcName ) FREERTOS_SYSTEM_CALL; void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; -QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, - const UBaseType_t uxItemSize, - uint8_t * pucQueueStorage, - StaticQueue_t * pxStaticQueue, - const uint8_t ucQueueType ) FREERTOS_SYSTEM_CALL; -QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, - QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, - BaseType_t xNewQueue ) FREERTOS_SYSTEM_CALL; void MPU_vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxQueueGetQueueNumber( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; uint8_t MPU_ucQueueGetQueueType( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; +/* Privileged only wrappers for Queue APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +void MPU_vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, + StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount, + StaticQueue_t * pxStaticQueue ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueGenericCreate( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + StaticQueue_t * pxStaticQueue, + const uint8_t ucQueueType ) PRIVILEGED_FUNCTION; +QueueSetHandle_t MPU_xQueueCreateSet( const UBaseType_t uxEventQueueLength ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, + BaseType_t xNewQueue ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue, + const void * const pvItemToQueue, + BaseType_t * const pxHigherPriorityTaskWoken, + const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue, + void * const pvBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue, + void * const pvBuffer, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; +TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; +QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + /* MPU versions of timers.h API functions. */ -TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, - const TickType_t xTimerPeriodInTicks, - const UBaseType_t uxAutoReload, - void * const pvTimerID, - TimerCallbackFunction_t pxCallbackFunction ) FREERTOS_SYSTEM_CALL; -TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, - const TickType_t xTimerPeriodInTicks, - const UBaseType_t uxAutoReload, - void * const pvTimerID, - TimerCallbackFunction_t pxCallbackFunction, - StaticTimer_t * pxTimerBuffer ) FREERTOS_SYSTEM_CALL; void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; void MPU_vTimerSetTimerID( TimerHandle_t xTimer, void * pvNewID ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTimerPendFunctionCall( PendedFunction_t xFunctionToPend, - void * pvParameter1, - uint32_t ulParameter2, - TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, const UBaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTimerCreateTimerTask( void ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, - const BaseType_t xCommandID, - const TickType_t xOptionalValue, - BaseType_t * const pxHigherPriorityTaskWoken, - const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +/* Privileged only wrappers for Timer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) PRIVILEGED_FUNCTION; +TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t * pxTimerBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) PRIVILEGED_FUNCTION; /* MPU versions of event_group.h API functions. */ -EventGroupHandle_t MPU_xEventGroupCreate( void ) FREERTOS_SYSTEM_CALL; -EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) FREERTOS_SYSTEM_CALL; EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, @@ -225,8 +267,26 @@ EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; -void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) FREERTOS_SYSTEM_CALL; -UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL; +#if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL; + void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL; +#endif /* ( configUSE_TRACE_FACILITY == 1 )*/ + +/* Privileged only wrappers for Event Group APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +EventGroupHandle_t MPU_xEventGroupCreate( void ) PRIVILEGED_FUNCTION; +EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) PRIVILEGED_FUNCTION; +void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION; /* MPU versions of message/stream_buffer.h API functions. */ size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, @@ -237,28 +297,45 @@ size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, void * pvRxData, size_t xBufferLengthBytes, TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; -size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; -void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; -BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +/* Privileged only wrappers for Stream Buffer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, - StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL; + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, size_t xTriggerLevelBytes, BaseType_t xIsMessageBuffer, uint8_t * const pucStreamBufferStorageArea, StaticStreamBuffer_t * const pxStaticStreamBuffer, StreamBufferCallbackFunction_t pxSendCompletedCallback, - StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) FREERTOS_SYSTEM_CALL; - - + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) PRIVILEGED_FUNCTION; +void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers, + uint8_t * ppucStreamBufferStorageArea, + StaticStreamBuffer_t * ppxStaticStreamBuffer ) PRIVILEGED_FUNCTION; +size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; #endif /* MPU_PROTOTYPES_H */ diff --git a/include/mpu_wrappers.h b/include/mpu_wrappers.h index fb8aedfee81..020efc3ef68 100644 --- a/include/mpu_wrappers.h +++ b/include/mpu_wrappers.h @@ -47,114 +47,184 @@ */ /* Map standard task.h API functions to the MPU equivalents. */ - #define xTaskCreate MPU_xTaskCreate - #define xTaskCreateStatic MPU_xTaskCreateStatic - #define vTaskDelete MPU_vTaskDelete - #define vTaskDelay MPU_vTaskDelay - #define xTaskDelayUntil MPU_xTaskDelayUntil - #define xTaskAbortDelay MPU_xTaskAbortDelay - #define uxTaskPriorityGet MPU_uxTaskPriorityGet - #define eTaskGetState MPU_eTaskGetState - #define vTaskGetInfo MPU_vTaskGetInfo - #define vTaskPrioritySet MPU_vTaskPrioritySet - #define vTaskSuspend MPU_vTaskSuspend - #define vTaskResume MPU_vTaskResume - #define vTaskSuspendAll MPU_vTaskSuspendAll - #define xTaskResumeAll MPU_xTaskResumeAll - #define xTaskGetTickCount MPU_xTaskGetTickCount - #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks - #define pcTaskGetName MPU_pcTaskGetName - #define xTaskGetHandle MPU_xTaskGetHandle - #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark - #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 - #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag - #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag - #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer - #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer - #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook - #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle - #define uxTaskGetSystemState MPU_uxTaskGetSystemState - #define vTaskList MPU_vTaskList - #define vTaskGetRunTimeStats MPU_vTaskGetRunTimeStats - #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter - #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent - #define xTaskGenericNotify MPU_xTaskGenericNotify - #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait - #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake - #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear - #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear - #define xTaskCatchUpTicks MPU_xTaskCatchUpTicks - - #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle - #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState - #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut - #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + #define vTaskDelay MPU_vTaskDelay + #define xTaskDelayUntil MPU_xTaskDelayUntil + #define xTaskAbortDelay MPU_xTaskAbortDelay + #define uxTaskPriorityGet MPU_uxTaskPriorityGet + #define eTaskGetState MPU_eTaskGetState + #define vTaskGetInfo MPU_vTaskGetInfo + #define vTaskSuspend MPU_vTaskSuspend + #define vTaskResume MPU_vTaskResume + #define xTaskGetTickCount MPU_xTaskGetTickCount + #define uxTaskGetNumberOfTasks MPU_uxTaskGetNumberOfTasks + #define pcTaskGetName MPU_pcTaskGetName + #define uxTaskGetStackHighWaterMark MPU_uxTaskGetStackHighWaterMark + #define uxTaskGetStackHighWaterMark2 MPU_uxTaskGetStackHighWaterMark2 + #define vTaskSetApplicationTaskTag MPU_vTaskSetApplicationTaskTag + #define xTaskGetApplicationTaskTag MPU_xTaskGetApplicationTaskTag + #define vTaskSetThreadLocalStoragePointer MPU_vTaskSetThreadLocalStoragePointer + #define pvTaskGetThreadLocalStoragePointer MPU_pvTaskGetThreadLocalStoragePointer + #define xTaskGetIdleTaskHandle MPU_xTaskGetIdleTaskHandle + #define uxTaskGetSystemState MPU_uxTaskGetSystemState + #define ulTaskGetIdleRunTimeCounter MPU_ulTaskGetIdleRunTimeCounter + #define ulTaskGetIdleRunTimePercent MPU_ulTaskGetIdleRunTimePercent + #define xTaskGenericNotify MPU_xTaskGenericNotify + #define xTaskGenericNotifyWait MPU_xTaskGenericNotifyWait + #define ulTaskGenericNotifyTake MPU_ulTaskGenericNotifyTake + #define xTaskGenericNotifyStateClear MPU_xTaskGenericNotifyStateClear + #define ulTaskGenericNotifyValueClear MPU_ulTaskGenericNotifyValueClear + #define vTaskSetTimeOutState MPU_vTaskSetTimeOutState + #define xTaskCheckForTimeOut MPU_xTaskCheckForTimeOut + #define xTaskGetCurrentTaskHandle MPU_xTaskGetCurrentTaskHandle + #define xTaskGetSchedulerState MPU_xTaskGetSchedulerState + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define ulTaskGetRunTimeCounter MPU_ulTaskGetRunTimeCounter + #define ulTaskGetRunTimePercent MPU_ulTaskGetRunTimePercent + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/* Privileged only wrappers for Task APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + #define xTaskCreate MPU_xTaskCreate + #define xTaskCreateStatic MPU_xTaskCreateStatic + #define vTaskDelete MPU_vTaskDelete + #define vTaskPrioritySet MPU_vTaskPrioritySet + #define xTaskGetHandle MPU_xTaskGetHandle + #define xTaskCallApplicationTaskHook MPU_xTaskCallApplicationTaskHook + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xTaskCreateRestricted MPU_xTaskCreateRestricted + #define xTaskCreateRestrictedStatic MPU_xTaskCreateRestrictedStatic + #define vTaskAllocateMPURegions MPU_vTaskAllocateMPURegions + #define xTaskGetStaticBuffers MPU_xTaskGetStaticBuffers + #define uxTaskPriorityGetFromISR MPU_uxTaskPriorityGetFromISR + #define xTaskResumeFromISR MPU_xTaskResumeFromISR + #define xTaskGetApplicationTaskTagFromISR MPU_xTaskGetApplicationTaskTagFromISR + #define xTaskGenericNotifyFromISR MPU_xTaskGenericNotifyFromISR + #define vTaskGenericNotifyGiveFromISR MPU_vTaskGenericNotifyGiveFromISR + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard queue.h API functions to the MPU equivalents. */ - #define xQueueGenericSend MPU_xQueueGenericSend - #define xQueueReceive MPU_xQueueReceive - #define xQueuePeek MPU_xQueuePeek - #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake - #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting - #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define xQueueGenericSend MPU_xQueueGenericSend + #define xQueueReceive MPU_xQueueReceive + #define xQueuePeek MPU_xQueuePeek + #define xQueueSemaphoreTake MPU_xQueueSemaphoreTake + #define uxQueueMessagesWaiting MPU_uxQueueMessagesWaiting + #define uxQueueSpacesAvailable MPU_uxQueueSpacesAvailable + #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder + #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive + #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive + #define xQueueAddToSet MPU_xQueueAddToSet + #define xQueueSelectFromSet MPU_xQueueSelectFromSet + + #if ( configQUEUE_REGISTRY_SIZE > 0 ) + #define vQueueAddToRegistry MPU_vQueueAddToRegistry + #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue + #define pcQueueGetName MPU_pcQueueGetName + #endif /* #if ( configQUEUE_REGISTRY_SIZE > 0 ) */ + +/* Privileged only wrappers for Queue APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ #define vQueueDelete MPU_vQueueDelete #define xQueueCreateMutex MPU_xQueueCreateMutex #define xQueueCreateMutexStatic MPU_xQueueCreateMutexStatic #define xQueueCreateCountingSemaphore MPU_xQueueCreateCountingSemaphore #define xQueueCreateCountingSemaphoreStatic MPU_xQueueCreateCountingSemaphoreStatic - #define xQueueGetMutexHolder MPU_xQueueGetMutexHolder - #define xQueueTakeMutexRecursive MPU_xQueueTakeMutexRecursive - #define xQueueGiveMutexRecursive MPU_xQueueGiveMutexRecursive #define xQueueGenericCreate MPU_xQueueGenericCreate #define xQueueGenericCreateStatic MPU_xQueueGenericCreateStatic + #define xQueueGenericReset MPU_xQueueGenericReset #define xQueueCreateSet MPU_xQueueCreateSet - #define xQueueAddToSet MPU_xQueueAddToSet #define xQueueRemoveFromSet MPU_xQueueRemoveFromSet - #define xQueueSelectFromSet MPU_xQueueSelectFromSet - #define xQueueGenericReset MPU_xQueueGenericReset - #if ( configQUEUE_REGISTRY_SIZE > 0 ) - #define vQueueAddToRegistry MPU_vQueueAddToRegistry - #define vQueueUnregisterQueue MPU_vQueueUnregisterQueue - #define pcQueueGetName MPU_pcQueueGetName - #endif + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xQueueGenericGetStaticBuffers MPU_xQueueGenericGetStaticBuffers + #define xQueueGenericSendFromISR MPU_xQueueGenericSendFromISR + #define xQueueGiveFromISR MPU_xQueueGiveFromISR + #define xQueuePeekFromISR MPU_xQueuePeekFromISR + #define xQueueReceiveFromISR MPU_xQueueReceiveFromISR + #define xQueueIsQueueEmptyFromISR MPU_xQueueIsQueueEmptyFromISR + #define xQueueIsQueueFullFromISR MPU_xQueueIsQueueFullFromISR + #define uxQueueMessagesWaitingFromISR MPU_uxQueueMessagesWaitingFromISR + #define xQueueGetMutexHolderFromISR MPU_xQueueGetMutexHolderFromISR + #define xQueueSelectFromSetFromISR MPU_xQueueSelectFromSetFromISR + #endif /* if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard timer.h API functions to the MPU equivalents. */ - #define pvTimerGetTimerID MPU_pvTimerGetTimerID - #define vTimerSetTimerID MPU_vTimerSetTimerID - #define xTimerIsTimerActive MPU_xTimerIsTimerActive - #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle - #define pcTimerGetName MPU_pcTimerGetName - #define vTimerSetReloadMode MPU_vTimerSetReloadMode - #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode - #define xTimerGetPeriod MPU_xTimerGetPeriod - #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime - #define xTimerGenericCommand MPU_xTimerGenericCommand + #define pvTimerGetTimerID MPU_pvTimerGetTimerID + #define vTimerSetTimerID MPU_vTimerSetTimerID + #define xTimerIsTimerActive MPU_xTimerIsTimerActive + #define xTimerGetTimerDaemonTaskHandle MPU_xTimerGetTimerDaemonTaskHandle + #define xTimerGenericCommand MPU_xTimerGenericCommand + #define pcTimerGetName MPU_pcTimerGetName + #define vTimerSetReloadMode MPU_vTimerSetReloadMode + #define uxTimerGetReloadMode MPU_uxTimerGetReloadMode + #define xTimerGetPeriod MPU_xTimerGetPeriod + #define xTimerGetExpiryTime MPU_xTimerGetExpiryTime + +/* Privileged only wrappers for Timer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xTimerGetReloadMode MPU_xTimerGetReloadMode + #define xTimerCreate MPU_xTimerCreate + #define xTimerCreateStatic MPU_xTimerCreateStatic + #define xTimerGetStaticBuffer MPU_xTimerGetStaticBuffer + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard event_group.h API functions to the MPU equivalents. */ - #define xEventGroupCreate MPU_xEventGroupCreate - #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic - #define xEventGroupWaitBits MPU_xEventGroupWaitBits - #define xEventGroupClearBits MPU_xEventGroupClearBits - #define xEventGroupSetBits MPU_xEventGroupSetBits - #define xEventGroupSync MPU_xEventGroupSync - #define vEventGroupDelete MPU_vEventGroupDelete + #define xEventGroupWaitBits MPU_xEventGroupWaitBits + #define xEventGroupClearBits MPU_xEventGroupClearBits + #define xEventGroupSetBits MPU_xEventGroupSetBits + #define xEventGroupSync MPU_xEventGroupSync + + #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + #define uxEventGroupGetNumber MPU_uxEventGroupGetNumber + #define vEventGroupSetNumber MPU_vEventGroupSetNumber + #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */ + +/* Privileged only wrappers for Event Group APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + #define xEventGroupCreate MPU_xEventGroupCreate + #define xEventGroupCreateStatic MPU_xEventGroupCreateStatic + #define vEventGroupDelete MPU_vEventGroupDelete + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xEventGroupGetStaticBuffer MPU_xEventGroupGetStaticBuffer + #define xEventGroupClearBitsFromISR MPU_xEventGroupClearBitsFromISR + #define xEventGroupSetBitsFromISR MPU_xEventGroupSetBitsFromISR + #define xEventGroupGetBitsFromISR MPU_xEventGroupGetBitsFromISR + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Map standard message/stream_buffer.h API functions to the MPU * equivalents. */ #define xStreamBufferSend MPU_xStreamBufferSend #define xStreamBufferReceive MPU_xStreamBufferReceive - #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes - #define vStreamBufferDelete MPU_vStreamBufferDelete #define xStreamBufferIsFull MPU_xStreamBufferIsFull #define xStreamBufferIsEmpty MPU_xStreamBufferIsEmpty - #define xStreamBufferReset MPU_xStreamBufferReset #define xStreamBufferSpacesAvailable MPU_xStreamBufferSpacesAvailable #define xStreamBufferBytesAvailable MPU_xStreamBufferBytesAvailable #define xStreamBufferSetTriggerLevel MPU_xStreamBufferSetTriggerLevel - #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate - #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + #define xStreamBufferNextMessageLengthBytes MPU_xStreamBufferNextMessageLengthBytes +/* Privileged only wrappers for Stream Buffer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ + + #define xStreamBufferGenericCreate MPU_xStreamBufferGenericCreate + #define xStreamBufferGenericCreateStatic MPU_xStreamBufferGenericCreateStatic + #define vStreamBufferDelete MPU_vStreamBufferDelete + #define xStreamBufferReset MPU_xStreamBufferReset + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + #define xStreamBufferGetStaticBuffers MPU_xStreamBufferGetStaticBuffers + #define xStreamBufferSendFromISR MPU_xStreamBufferSendFromISR + #define xStreamBufferReceiveFromISR MPU_xStreamBufferReceiveFromISR + #define xStreamBufferSendCompletedFromISR MPU_xStreamBufferSendCompletedFromISR + #define xStreamBufferReceiveCompletedFromISR MPU_xStreamBufferReceiveCompletedFromISR + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /* Remove the privileged function macro, but keep the PRIVILEGED_DATA * macro so applications can place data in privileged access sections diff --git a/include/portable.h b/include/portable.h index 52d5434fe93..5734eb72037 100644 --- a/include/portable.h +++ b/include/portable.h @@ -110,13 +110,15 @@ StackType_t * pxEndOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION; #else StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; - #endif + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) PRIVILEGED_FUNCTION; + #endif /* if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) */ #else /* if ( portUSING_MPU_WRAPPERS == 1 ) */ #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, @@ -229,6 +231,22 @@ void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; #endif +/** + * @brief Checks if the calling task is authorized to access the given buffer. + * + * @param pvBuffer The buffer which the calling task wants to access. + * @param ulBufferLength The length of the pvBuffer. + * @param ulAccessRequested The permissions that the calling task wants. + * + * @return pdTRUE if the calling task is authorized to access the buffer, + * pdFALSE otherwise. + */ +#if ( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) PRIVILEGED_FUNCTION; +#endif + /* *INDENT-OFF* */ #ifdef __cplusplus } diff --git a/include/queue.h b/include/queue.h index e5092ac5217..1c1b9822afd 100644 --- a/include/queue.h +++ b/include/queue.h @@ -1752,7 +1752,7 @@ void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber ) PRIVILEGED_FUNCTION; UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; uint8_t ucQueueGetQueueType( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; - +UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/include/task.h b/include/task.h index 702f74dce9b..af17ced1753 100644 --- a/include/task.h +++ b/include/task.h @@ -66,6 +66,11 @@ #define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL ) #define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL ) +/* MPU region permissions stored in MPU settings to + * authorize access requests. */ +#define tskMPU_READ_PERMISSION ( 1UL << 0UL ) +#define tskMPU_WRITE_PERMISSION ( 1UL << 1UL ) + /* The direct to task notification feature used to have only a single notification * per task. Now there is an array of notifications per task that is dimensioned by * configTASK_NOTIFICATION_ARRAY_ENTRIES. For backward compatibility, any use of the @@ -3192,6 +3197,14 @@ TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; */ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; +#if ( portUSING_MPU_WRAPPERS == 1 ) + +/* + * For internal use only. Get MPU settings associated with a task. + */ + xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* portUSING_MPU_WRAPPERS */ /* *INDENT-OFF* */ #ifdef __cplusplus diff --git a/portable/ARMv8M/copy_files.py b/portable/ARMv8M/copy_files.py index d064969809c..3609c67ef33 100644 --- a/portable/ARMv8M/copy_files.py +++ b/portable/ARMv8M/copy_files.py @@ -73,16 +73,22 @@ 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ')], 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM55', 'portmacro.h')], 'ARM_CM85' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')], 'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'portasm.c'), + os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.c'), os.path.join('non_secure', 'portable', 'GCC', 'ARM_CM85', 'portmacro.h')] }, 'IAR':{ @@ -91,16 +97,22 @@ 'ARM_CM33' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33')], 'ARM_CM33_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ')], 'ARM_CM35P' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM35P_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM35P', 'portmacro.h')], 'ARM_CM55' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM55_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM55', 'portmacro.h')], 'ARM_CM85' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')], 'ARM_CM85_NTZ' : [os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'portasm.s'), + os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM33_NTZ', 'mpu_wrappers_v2_asm.S'), os.path.join('non_secure', 'portable', 'IAR', 'ARM_CM85', 'portmacro.h')] }, } diff --git a/portable/ARMv8M/non_secure/port.c b/portable/ARMv8M/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/ARMv8M/non_secure/port.c +++ b/portable/ARMv8M/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c index 44f159af1fa..64a24f527ff 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23/portasm.c @@ -44,6 +44,109 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,83 +157,24 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r2] \n"/* Program RNR = 4. */ - " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r2] \n"/* Program RNR = 5. */ - " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r2] \n"/* Program RNR = 6. */ - " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r2] \n"/* Program RNR = 7. */ - " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -237,6 +281,167 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n" /* LR is now in r3. */ + " mov lr, r3 \n" /* Restore LR. */ + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " stmia r2!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r2!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " mov r6, lr \n" /* r6 = LR. */ + " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r4} \n" /* LR is now in r4. */ + " mov lr, r4 \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,52 +465,26 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #48 \n"/* r2 = r2 - 48. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " select_next_task: \n" " cpsid i \n" @@ -316,85 +495,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r4] \n"/* Program RNR = 4. */ - " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r4] \n"/* Program RNR = 5. */ - " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r4] \n"/* Program RNR = 6. */ - " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r4] \n"/* Program RNR = 7. */ - " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " adds r2, r2, #16 \n"/* Move to the high registers. */ @@ -411,16 +527,62 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -443,6 +605,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c index 7fb7b5ade5d..b11b6e97c5e 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM23_NTZ/portasm.c @@ -44,6 +44,106 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs_first_task: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,78 +154,21 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -232,6 +275,136 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + " stmia r1!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r1!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r2, psp \n" /* r2 = PSP. */ + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " mov r5, lr \n" /* r5 = LR. */ + " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -241,30 +414,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " mrs r0, psp \n"/* Read PSP in r0. */ " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #else /* configENABLE_MPU */ - " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " cpsid i \n" " bl vTaskSwitchContext \n" @@ -274,88 +433,76 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " adds r0, r0, #28 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - " bx r3 \n" - #else /* configENABLE_MPU */ - " adds r0, r0, #24 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - " bx r3 \n" - #endif /* configENABLE_MPU */ + " adds r0, r0, #24 \n"/* Move to the high registers. */ + " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ + " mov r8, r4 \n"/* r8 = r4. */ + " mov r9, r5 \n"/* r9 = r5. */ + " mov r10, r6 \n"/* r10 = r6. */ + " mov r11, r7 \n"/* r11 = r7. */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ + " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -378,4 +525,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c +++ b/portable/ARMv8M/non_secure/portable/GCC/ARM_CM33_NTZ/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s index fffed8df619..648ae005010 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23/portasm.s @@ -33,12 +33,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -98,65 +107,99 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r2] /* Program RNR = 4. */ - ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r2] /* Program RNR = 5. */ - ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r2] /* Program RNR = 6. */ - ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r2] /* Program RNR = 7. */ - ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -167,6 +210,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -199,6 +243,149 @@ vClearInterruptMask: msr PRIMASK, r0 bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* Restore LR. */ + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + stmia r2!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r2!, {r4-r7} /* Store r8-r11. */ + ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + mov r6, lr /* r6 = LR. */ + stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r4} /* LR is now in r4. */ + mov lr, r4 + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ @@ -216,41 +403,18 @@ PendSV_Handler: bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ + b select_next_task save_ns_context: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #48 /* r2 = r2 - 48. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ @@ -261,7 +425,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ select_next_task: cpsid i @@ -272,68 +435,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r4] /* Program RNR = 4. */ - ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r4] /* Program RNR = 5. */ - ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r4] /* Program RNR = 6. */ - ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r4] /* Program RNR = 7. */ - ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -350,7 +451,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: adds r2, r2, #16 /* Move to the high registers. */ @@ -363,8 +463,45 @@ PendSV_Handler: subs r2, r2, #32 /* Go back to the low registers. */ ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -375,6 +512,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s index 62bd3872284..8f77c4dafb1 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM23_NTZ/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -88,63 +97,97 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs_first_task: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -153,6 +196,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -187,23 +231,127 @@ vClearInterruptMask: bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + stmia r1!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r1!, {r4-r7} /* Store r8-r11. */ + ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r2, psp /* r2 = PSP. */ + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + mov r5, lr /* r5 = LR. */ + stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r0, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#else /* configENABLE_MPU */ + subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */ str r0, [r1] /* Save the new top of stack in TCB. */ mrs r2, psplim /* r2 = PSPLIM. */ @@ -214,7 +362,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#endif /* configENABLE_MPU */ cpsid i bl vTaskSwitchContext @@ -224,63 +371,6 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - adds r0, r0, #28 /* Move to the high registers. */ - ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ - mov r8, r4 /* r8 = r4. */ - mov r9, r5 /* r9 = r5. */ - mov r10, r6 /* r10 = r6. */ - mov r11, r7 /* r11 = r7. */ - msr psp, r0 /* Remember the new top of stack for the task. */ - subs r0, r0, #44 /* Move to the starting of the saved context. */ - ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ - bx r3 -#else /* configENABLE_MPU */ adds r0, r0, #24 /* Move to the high registers. */ ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ mov r8, r4 /* r8 = r4. */ @@ -292,9 +382,45 @@ PendSV_Handler: ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ bx r3 + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -305,6 +431,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s +++ b/portable/ARMv8M/non_secure/portable/IAR/ARM_CM33_NTZ/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/ARMv8M/non_secure/portmacrocommon.h b/portable/ARMv8M/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/ARMv8M/non_secure/portmacrocommon.h +++ b/portable/ARMv8M/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/Common/mpu_wrappers.c b/portable/Common/mpu_wrappers.c index 92841e1363e..c9951956fea 100644 --- a/portable/Common/mpu_wrappers.c +++ b/portable/Common/mpu_wrappers.c @@ -48,7 +48,7 @@ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*-----------------------------------------------------------*/ -#if ( portUSING_MPU_WRAPPERS == 1 ) +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode, @@ -2537,5 +2537,5 @@ #endif /*-----------------------------------------------------------*/ -#endif /* portUSING_MPU_WRAPPERS == 1 */ +#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) */ /*-----------------------------------------------------------*/ diff --git a/portable/Common/mpu_wrappers_v2.c b/portable/Common/mpu_wrappers_v2.c new file mode 100644 index 00000000000..1e28d8e4eb3 --- /dev/null +++ b/portable/Common/mpu_wrappers_v2.c @@ -0,0 +1,4121 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* + * Implementation of the wrapper functions used to raise the processor privilege + * before calling a standard FreeRTOS API function. + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" +#include "mpu_prototypes.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + #ifndef configPROTECTED_KERNEL_OBJECT_POOL_SIZE + #error configPROTECTED_KERNEL_OBJECT_POOL_SIZE must be defined to maximum number of kernel objects in the application. + #endif + +/** + * @brief Offset added to the index before returning to the user. + * + * If the actual handle is stored at index i, ( i + INDEX_OFFSET ) + * is returned to the user. + */ + #define INDEX_OFFSET 1 + +/** + * @brief Opaque type for a kernel object. + */ + struct OpaqueObject; + typedef struct OpaqueObject * OpaqueObjectHandle_t; + +/** + * @brief Defines kernel object in the kernel object pool. + */ + typedef struct KernelObject + { + OpaqueObjectHandle_t xInternalObjectHandle; + uint32_t ulKernelObjectType; + void * pvKernelObjectData; + } KernelObject_t; + +/** + * @brief Kernel object types. + */ + #define KERNEL_OBJECT_TYPE_INVALID ( 0UL ) + #define KERNEL_OBJECT_TYPE_QUEUE ( 1UL ) + #define KERNEL_OBJECT_TYPE_TASK ( 2UL ) + #define KERNEL_OBJECT_TYPE_STREAM_BUFFER ( 3UL ) + #define KERNEL_OBJECT_TYPE_EVENT_GROUP ( 4UL ) + #define KERNEL_OBJECT_TYPE_TIMER ( 5UL ) + +/** + * @brief Checks whether an external index is valid or not. + */ + #define IS_EXTERNAL_INDEX_VALID( lIndex ) \ + ( ( ( lIndex ) >= INDEX_OFFSET ) && \ + ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE + INDEX_OFFSET ) ) ) + +/** + * @brief Checks whether an internal index is valid or not. + */ + #define IS_INTERNAL_INDEX_VALID( lIndex ) \ + ( ( ( lIndex ) >= 0 ) && \ + ( ( lIndex ) < ( configPROTECTED_KERNEL_OBJECT_POOL_SIZE ) ) ) + +/** + * @brief Converts an internal index into external. + */ + #define CONVERT_TO_EXTERNAL_INDEX( lIndex ) ( ( lIndex ) + INDEX_OFFSET ) + +/** + * @brief Converts an external index into internal. + */ + #define CONVERT_TO_INTERNAL_INDEX( lIndex ) ( ( lIndex ) - INDEX_OFFSET ) + +/** + * @brief Get the index of a free slot in the kernel object pool. + * + * If a free slot is found, this function marks the slot as + * "not free". + * + * @return Index of a free slot is returned, if a free slot is + * found. Otherwise -1 is returned. + */ + static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) PRIVILEGED_FUNCTION; + +/** + * @brief Set the given index as free in the kernel object pool. + * + * @param lIndex The index to set as free. + */ + static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) PRIVILEGED_FUNCTION; + +/** + * @brief Get the index at which a given kernel object is stored. + * + * @param xHandle The given kernel object handle. + * @param ulKernelObjectType The kernel object type. + * + * @return Index at which the kernel object is stored if it is a valid + * handle, -1 otherwise. + */ + static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle, + uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION; + +/** + * @brief Store the given kernel object handle at the given index in + * the kernel object pool. + * + * @param lIndex Index to store the given handle at. + * @param xHandle Kernel object handle to store. + * @param pvKernelObjectData The data associated with the kernel object. + * Currently, only used for timer objects to store timer callback. + * @param ulKernelObjectType The kernel object type. + */ + static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex, + OpaqueObjectHandle_t xHandle, + void * pvKernelObjectData, + uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION; + +/** + * @brief Get the kernel object handle at the given index from + * the kernel object pool. + * + * @param lIndex Index at which to get the kernel object handle. + * @param ulKernelObjectType The kernel object type. + * + * @return The kernel object handle at the index. + */ + static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex, + uint32_t ulKernelObjectType ) PRIVILEGED_FUNCTION; + + #if ( configUSE_TIMERS == 1 ) + +/** + * @brief The function registered as callback for all the timers. + * + * We intercept all the timer callbacks so that we can call application + * callbacks with opaque handle. + * + * @param xInternalHandle The internal timer handle. + */ + static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) PRIVILEGED_FUNCTION; + + #endif /* #if ( configUSE_TIMERS == 1 ) */ + +/* + * Wrappers to keep all the casting in one place. + */ + #define MPU_StoreQueueHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetQueueHandleAtIndex( lIndex ) ( QueueHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE ) + + #if ( configUSE_QUEUE_SETS == 1 ) + #define MPU_StoreQueueSetHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetQueueSetHandleAtIndex( lIndex ) ( QueueSetHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_StoreQueueSetMemberHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetQueueSetMemberHandleAtIndex( lIndex ) ( QueueSetMemberHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_QUEUE ) + #define MPU_GetIndexForQueueSetMemberHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_QUEUE ) + #endif + +/* + * Wrappers to keep all the casting in one place for Task APIs. + */ + #define MPU_StoreTaskHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_TASK ) + #define MPU_GetTaskHandleAtIndex( lIndex ) ( TaskHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TASK ) + #define MPU_GetIndexForTaskHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TASK ) + +/* + * Wrappers to keep all the casting in one place for Event Group APIs. + */ + #define MPU_StoreEventGroupHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_EVENT_GROUP ) + #define MPU_GetEventGroupHandleAtIndex( lIndex ) ( EventGroupHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_EVENT_GROUP ) + #define MPU_GetIndexForEventGroupHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_EVENT_GROUP ) + +/* + * Wrappers to keep all the casting in one place for Stream Buffer APIs. + */ + #define MPU_StoreStreamBufferHandleAtIndex( lIndex, xHandle ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, NULL, KERNEL_OBJECT_TYPE_STREAM_BUFFER ) + #define MPU_GetStreamBufferHandleAtIndex( lIndex ) ( StreamBufferHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_STREAM_BUFFER ) + #define MPU_GetIndexForStreamBufferHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_STREAM_BUFFER ) + + #if ( configUSE_TIMERS == 1 ) + +/* + * Wrappers to keep all the casting in one place for Timer APIs. + */ + #define MPU_StoreTimerHandleAtIndex( lIndex, xHandle, pxApplicationCallback ) MPU_StoreHandleAndDataAtIndex( lIndex, ( OpaqueObjectHandle_t ) xHandle, ( void * ) pxApplicationCallback, KERNEL_OBJECT_TYPE_TIMER ) + #define MPU_GetTimerHandleAtIndex( lIndex ) ( TimerHandle_t ) MPU_GetHandleAtIndex( lIndex, KERNEL_OBJECT_TYPE_TIMER ) + #define MPU_GetIndexForTimerHandle( xHandle ) MPU_GetIndexForHandle( ( OpaqueObjectHandle_t ) xHandle, KERNEL_OBJECT_TYPE_TIMER ) + + #endif /* #if ( configUSE_TIMERS == 1 ) */ + +/*-----------------------------------------------------------*/ + +/** + * @brief Kernel object pool. + */ + PRIVILEGED_DATA static KernelObject_t xKernelObjectPool[ configPROTECTED_KERNEL_OBJECT_POOL_SIZE ] = { NULL }; +/*-----------------------------------------------------------*/ + + static int32_t MPU_GetFreeIndexInKernelObjectPool( void ) /* PRIVILEGED_FUNCTION */ + { + int32_t i, lFreeIndex = -1; + + /* This function is called only from resource create APIs + * which are not supposed to be called from ISRs. Therefore, + * we only need to suspend the scheduler and do not require + * critical section. */ + vTaskSuspendAll(); + { + for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ ) + { + if( xKernelObjectPool[ i ].xInternalObjectHandle == NULL ) + { + /* Mark this index as not free. */ + xKernelObjectPool[ i ].xInternalObjectHandle = ( OpaqueObjectHandle_t ) ( ~0 ); + lFreeIndex = i; + break; + } + } + } + xTaskResumeAll(); + + return lFreeIndex; + } +/*-----------------------------------------------------------*/ + + static void MPU_SetIndexFreeInKernelObjectPool( int32_t lIndex ) /* PRIVILEGED_FUNCTION */ + { + configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE ); + + taskENTER_CRITICAL(); + { + xKernelObjectPool[ lIndex ].xInternalObjectHandle = NULL; + xKernelObjectPool[ lIndex ].ulKernelObjectType = KERNEL_OBJECT_TYPE_INVALID; + xKernelObjectPool[ lIndex ].pvKernelObjectData = NULL; + } + taskEXIT_CRITICAL(); + } +/*-----------------------------------------------------------*/ + + static int32_t MPU_GetIndexForHandle( OpaqueObjectHandle_t xHandle, + uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */ + { + int32_t i, lIndex = -1; + + configASSERT( xHandle != NULL ); + + for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ ) + { + if( ( xKernelObjectPool[ i ].xInternalObjectHandle == xHandle ) && + ( xKernelObjectPool[ i ].ulKernelObjectType == ulKernelObjectType ) ) + { + lIndex = i; + break; + } + } + + return lIndex; + } +/*-----------------------------------------------------------*/ + + static void MPU_StoreHandleAndDataAtIndex( int32_t lIndex, + OpaqueObjectHandle_t xHandle, + void * pvKernelObjectData, + uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */ + { + configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE ); + xKernelObjectPool[ lIndex ].xInternalObjectHandle = xHandle; + xKernelObjectPool[ lIndex ].ulKernelObjectType = ulKernelObjectType; + xKernelObjectPool[ lIndex ].pvKernelObjectData = pvKernelObjectData; + } +/*-----------------------------------------------------------*/ + + static OpaqueObjectHandle_t MPU_GetHandleAtIndex( int32_t lIndex, + uint32_t ulKernelObjectType ) /* PRIVILEGED_FUNCTION */ + { + configASSERT( IS_INTERNAL_INDEX_VALID( lIndex ) != pdFALSE ); + configASSERT( xKernelObjectPool[ lIndex ].ulKernelObjectType == ulKernelObjectType ); + return xKernelObjectPool[ lIndex ].xInternalObjectHandle; + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + static void MPU_TimerCallback( TimerHandle_t xInternalHandle ) /* PRIVILEGED_FUNCTION */ + { + int32_t i, lIndex = -1; + TimerHandle_t xExternalHandle = NULL; + TimerCallbackFunction_t pxApplicationCallBack = NULL; + + /* Coming from the timer task and therefore, should be valid. */ + configASSERT( xInternalHandle != NULL ); + + for( i = 0; i < configPROTECTED_KERNEL_OBJECT_POOL_SIZE; i++ ) + { + if( ( ( TimerHandle_t ) xKernelObjectPool[ i ].xInternalObjectHandle == xInternalHandle ) && + ( xKernelObjectPool[ i ].ulKernelObjectType == KERNEL_OBJECT_TYPE_TIMER ) ) + { + lIndex = i; + break; + } + } + + configASSERT( lIndex != -1 ); + xExternalHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + + pxApplicationCallBack = ( TimerCallbackFunction_t ) xKernelObjectPool[ lIndex ].pvKernelObjectData; + pxApplicationCallBack( xExternalHandle ); + } + + #endif /* #if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for tasks APIs. */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskDelayUntil == 1 ) + + BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime, + TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskDelayUntilImpl( TickType_t * const pxPreviousWakeTime, + TickType_t xTimeIncrement ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + BaseType_t xIsPreviousWakeTimeAccessible = pdFALSE; + + xIsPreviousWakeTimeAccessible = xPortIsAuthorizedToAccessBuffer( pxPreviousWakeTime, + sizeof( TickType_t ), + ( tskMPU_WRITE_PERMISSION | tskMPU_READ_PERMISSION ) ); + + if( xIsPreviousWakeTimeAccessible == pdTRUE ) + { + xReturn = xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); + } + + return xReturn; + } + + #endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskAbortDelayImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( xTask == NULL ) + { + xReturn = xTaskAbortDelay( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskAbortDelay( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskDelay == 1 ) + + void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + + void MPU_vTaskDelayImpl( TickType_t xTicksToDelay ) /* PRIVILEGED_FUNCTION */ + { + vTaskDelay( xTicksToDelay ); + } + + #endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskPriorityGetImpl( const TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = configMAX_PRIORITIES; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( pxTask == NULL ) + { + uxReturn = uxTaskPriorityGet( pxTask ); + } + else + { + lIndex = ( int32_t ) pxTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskPriorityGet( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_eTaskGetState == 1 ) + + eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) PRIVILEGED_FUNCTION; + + eTaskState MPU_eTaskGetStateImpl( TaskHandle_t pxTask ) /* PRIVILEGED_FUNCTION */ + { + eTaskState eReturn = eInvalid; + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( pxTask == NULL ) + { + eReturn = eTaskGetState( pxTask ); + } + else + { + lIndex = ( int32_t ) pxTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + eReturn = eTaskGetState( xInternalTaskHandle ); + } + } + } + + return eReturn; + } + + #endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + void MPU_vTaskGetInfoImpl( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) PRIVILEGED_FUNCTION; + + void MPU_vTaskGetInfoImpl( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + BaseType_t xIsTaskStatusWriteable = pdFALSE; + + xIsTaskStatusWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatus, + sizeof( TaskStatus_t ), + tskMPU_WRITE_PERMISSION ); + + if( xIsTaskStatusWriteable == pdTRUE ) + { + if( xTask == NULL ) + { + vTaskGetInfo( xTask, pxTaskStatus, xGetFreeStackSpace, eState ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskGetInfo( xInternalTaskHandle, pxTaskStatus, xGetFreeStackSpace, eState ); + } + } + } + } + } + + #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xTaskGetIdleTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xIdleTaskHandle = NULL; + + xIdleTaskHandle = xTaskGetIdleTaskHandle(); + + return xIdleTaskHandle; + } + + #endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskSuspend == 1 ) + + void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSuspendImpl( TaskHandle_t pxTaskToSuspend ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( pxTaskToSuspend == NULL ) + { + vTaskSuspend( pxTaskToSuspend ); + } + else + { + /* After the scheduler starts, only privileged tasks are allowed + * to suspend other tasks. */ + if( ( xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED ) || ( portIS_TASK_PRIVILEGED() == pdTRUE ) ) + { + lIndex = ( int32_t ) pxTaskToSuspend; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskSuspend( xInternalTaskHandle ); + } + } + } + } + } + + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskSuspend == 1 ) + + void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) PRIVILEGED_FUNCTION; + + void MPU_vTaskResumeImpl( TaskHandle_t pxTaskToResume ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) pxTaskToResume; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskResume( xInternalTaskHandle ); + } + } + } + + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + + TickType_t MPU_xTaskGetTickCountImpl( void ) PRIVILEGED_FUNCTION; + + TickType_t MPU_xTaskGetTickCountImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TickType_t xReturn; + + xReturn = xTaskGetTickCount(); + + return xReturn; + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskGetNumberOfTasksImpl( void ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn; + + uxReturn = uxTaskGetNumberOfTasks(); + + return uxReturn; + } +/*-----------------------------------------------------------*/ + + char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; + + char * MPU_pcTaskGetNameImpl( TaskHandle_t xTaskToQuery ) /* PRIVILEGED_FUNCTION */ + { + char * pcReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTaskToQuery == NULL ) + { + pcReturn = pcTaskGetName( xTaskToQuery ); + } + else + { + lIndex = ( int32_t ) xTaskToQuery; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + pcReturn = pcTaskGetName( xInternalTaskHandle ); + } + } + } + + return pcReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounterImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = ulTaskGetRunTimeCounter( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = ulTaskGetRunTimeCounter( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercentImpl( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = ulTaskGetRunTimePercent( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = ulTaskGetRunTimePercent( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercentImpl( void ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn; + + xReturn = ulTaskGetIdleRunTimePercent(); + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) PRIVILEGED_FUNCTION; + + configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounterImpl( void ) /* PRIVILEGED_FUNCTION */ + { + configRUN_TIME_COUNTER_TYPE xReturn; + + xReturn = ulTaskGetIdleRunTimeCounter(); + + return xReturn; + } + + #endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask, + TaskHookFunction_t pxTagValue ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSetApplicationTaskTagImpl( TaskHandle_t xTask, + TaskHookFunction_t pxTagValue ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( xTask == NULL ) + { + vTaskSetApplicationTaskTag( xTask, pxTagValue ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskSetApplicationTaskTag( xInternalTaskHandle, pxTagValue ); + } + } + } + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTagImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + TaskHookFunction_t xReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskGetApplicationTaskTag( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGetApplicationTaskTag( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTaskToSet == NULL ) + { + vTaskSetThreadLocalStoragePointer( xTaskToSet, xIndex, pvValue ); + } + else + { + lIndex = ( int32_t ) xTaskToSet; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskSetThreadLocalStoragePointer( xInternalTaskHandle, xIndex, pvValue ); + } + } + } + } + + #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) PRIVILEGED_FUNCTION; + + void * MPU_pvTaskGetThreadLocalStoragePointerImpl( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* PRIVILEGED_FUNCTION */ + { + void * pvReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTaskToQuery == NULL ) + { + pvReturn = pvTaskGetThreadLocalStoragePointer( xTaskToQuery, xIndex ); + } + else + { + lIndex = ( int32_t ) xTaskToQuery; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + pvReturn = pvTaskGetThreadLocalStoragePointer( xInternalTaskHandle, xIndex ); + } + } + } + + return pvReturn; + } + + #endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray, + UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskGetSystemStateImpl( TaskStatus_t * pxTaskStatusArray, + UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * pulTotalRunTime ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = pdFALSE; + UBaseType_t xIsTaskStatusArrayWriteable = pdFALSE; + UBaseType_t xIsTotalRunTimeWriteable = pdFALSE; + + xIsTaskStatusArrayWriteable = xPortIsAuthorizedToAccessBuffer( pxTaskStatusArray, + sizeof( TaskStatus_t ) * uxArraySize, + tskMPU_WRITE_PERMISSION ); + + if( pulTotalRunTime != NULL ) + { + xIsTotalRunTimeWriteable = xPortIsAuthorizedToAccessBuffer( pulTotalRunTime, + sizeof( configRUN_TIME_COUNTER_TYPE ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( xIsTaskStatusArrayWriteable == pdTRUE ) && + ( ( pulTotalRunTime == NULL ) || ( xIsTotalRunTimeWriteable == pdTRUE ) ) ) + { + uxReturn = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, pulTotalRunTime ); + } + + return uxReturn; + } + + #endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTaskGetStackHighWaterMarkImpl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + + configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + + configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2Impl( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + configSTACK_DEPTH_TYPE uxReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark2( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskGetStackHighWaterMark2( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xTaskGetCurrentTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + TaskHandle_t xExternalTaskHandle = NULL; + int32_t lIndex; + + xInternalTaskHandle = xTaskGetCurrentTaskHandle(); + + if( xInternalTaskHandle != NULL ) + { + lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle ); + + if( lIndex != -1 ) + { + xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + + return xExternalTaskHandle; + } + + #endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetSchedulerState == 1 ) + + BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGetSchedulerStateImpl( void ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = taskSCHEDULER_NOT_STARTED; + + xReturn = xTaskGetSchedulerState(); + + return xReturn; + } + + #endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + + void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + + void MPU_vTaskSetTimeOutStateImpl( TimeOut_t * const pxTimeOut ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xIsTimeOutWriteable = pdFALSE; + + xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut, + sizeof( TimeOut_t ), + tskMPU_WRITE_PERMISSION ); + + if( xIsTimeOutWriteable == pdTRUE ) + { + vTaskSetTimeOutState( pxTimeOut ); + } + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskCheckForTimeOutImpl( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + BaseType_t xIsTimeOutWriteable = pdFALSE; + BaseType_t xIsTicksToWaitWriteable = pdFALSE; + + xIsTimeOutWriteable = xPortIsAuthorizedToAccessBuffer( pxTimeOut, + sizeof( TimeOut_t ), + tskMPU_WRITE_PERMISSION ); + xIsTicksToWaitWriteable = xPortIsAuthorizedToAccessBuffer( pxTicksToWait, + sizeof( TickType_t ), + tskMPU_WRITE_PERMISSION ); + + if( ( xIsTimeOutWriteable == pdTRUE ) && ( xIsTicksToWaitWriteable == pdTRUE ) ) + { + xReturn = xTaskCheckForTimeOut( pxTimeOut, pxTicksToWait ); + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGenericNotifyImpl( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + BaseType_t xIsPreviousNotificationValueWriteable = pdFALSE; + + if( pulPreviousNotificationValue != NULL ) + { + xIsPreviousNotificationValueWriteable = xPortIsAuthorizedToAccessBuffer( pulPreviousNotificationValue, + sizeof( uint32_t ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( pulPreviousNotificationValue == NULL ) || ( xIsPreviousNotificationValueWriteable == pdTRUE ) ) + { + lIndex = ( int32_t ) xTaskToNotify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGenericNotify( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGenericNotifyWaitImpl( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + BaseType_t xIsNotificationValueWritable = pdFALSE; + + if( pulNotificationValue != NULL ) + { + xIsNotificationValueWritable = xPortIsAuthorizedToAccessBuffer( pulNotificationValue, + sizeof( uint32_t ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( pulNotificationValue == NULL ) || ( xIsNotificationValueWritable == pdTRUE ) ) + { + xReturn = xTaskGenericNotifyWait( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ); + } + + return xReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + uint32_t MPU_ulTaskGenericNotifyTakeImpl( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulReturn; + + ulReturn = ulTaskGenericNotifyTake( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ); + + return ulReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTaskGenericNotifyStateClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskGenericNotifyStateClear( xTask, uxIndexToClear ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGenericNotifyStateClear( xInternalTaskHandle, uxIndexToClear ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; + + uint32_t MPU_ulTaskGenericNotifyValueClearImpl( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulReturn = 0; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + ulReturn = ulTaskGenericNotifyValueClear( xTask, uxIndexToClear, ulBitsToClear ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + ulReturn = ulTaskGenericNotifyValueClear( xInternalTaskHandle, uxIndexToClear, ulBitsToClear ); + } + } + } + + return ulReturn; + } + + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Task APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskCreate( TaskFunction_t pvTaskCode, + const char * const pcName, + uint16_t usStackDepth, + void * pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + /* xTaskCreate() can only be used to create privileged tasks in MPU port. */ + if( ( uxPriority & portPRIVILEGE_BIT ) != 0 ) + { + xReturn = xTaskCreate( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, &( xInternalTaskHandle ) ); + + if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + + if( pxCreatedTask != NULL ) + { + *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + } + + return xReturn; + } + + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t MPU_xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xExternalTaskHandle = NULL; + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalTaskHandle = xTaskCreateStatic( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer ); + + if( xInternalTaskHandle != NULL ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalTaskHandle; + } + + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_vTaskDelete == 1 ) + + void MPU_vTaskDelete( TaskHandle_t pxTaskToDelete ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( pxTaskToDelete == NULL ) + { + xInternalTaskHandle = xTaskGetCurrentTaskHandle(); + lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle ); + + vTaskDelete( xInternalTaskHandle ); + + if( lIndex != -1 ) + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + else + { + lIndex = ( int32_t ) pxTaskToDelete; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskDelete( xInternalTaskHandle ); + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } + } + } + + #endif /* #if ( INCLUDE_vTaskDelete == 1 ) */ +/*-----------------------------------------------------------*/ + + + #if ( INCLUDE_vTaskPrioritySet == 1 ) + + void MPU_vTaskPrioritySet( TaskHandle_t pxTask, + UBaseType_t uxNewPriority ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( pxTask == NULL ) + { + vTaskPrioritySet( pxTask, uxNewPriority ); + } + else + { + lIndex = ( int32_t ) pxTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskPrioritySet( xInternalTaskHandle, uxNewPriority ); + } + } + } + } + + #endif /* if ( INCLUDE_vTaskPrioritySet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t MPU_xTaskGetHandle( const char * pcNameToQuery ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + TaskHandle_t xExternalTaskHandle = NULL; + int32_t lIndex; + + xInternalTaskHandle = xTaskGetHandle( pcNameToQuery ); + + if( xInternalTaskHandle != NULL ) + { + lIndex = MPU_GetIndexForTaskHandle( xInternalTaskHandle ); + + if( lIndex != -1 ) + { + xExternalTaskHandle = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + + return xExternalTaskHandle; + } + + #endif /* if ( INCLUDE_xTaskGetHandle == 1 ) */ +/*-----------------------------------------------------------*/ + + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t MPU_xTaskCallApplicationTaskHook( TaskHandle_t xTask, + void * pvParameter ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskCallApplicationTaskHook( xTask, pvParameter ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskCallApplicationTaskHook( xInternalTaskHandle, pvParameter ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xReturn = xTaskCreateRestricted( pxTaskDefinition, &( xInternalTaskHandle ) ); + + if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + + if( pxCreatedTask != NULL ) + { + *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xReturn; + } + + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xReturn = xTaskCreateRestrictedStatic( pxTaskDefinition, &( xInternalTaskHandle ) ); + + if( ( xReturn == pdPASS ) && ( xInternalTaskHandle != NULL ) ) + { + MPU_StoreTaskHandleAtIndex( lIndex, xInternalTaskHandle ); + + if( pxCreatedTask != NULL ) + { + *pxCreatedTask = ( TaskHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xReturn; + } + + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + void MPU_vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, + const MemoryRegion_t * const xRegions ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + + if( xTaskToModify == NULL ) + { + vTaskAllocateMPURegions( xTaskToModify, xRegions ); + } + else + { + lIndex = ( int32_t ) xTaskToModify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskAllocateMPURegions( xInternalTaskHandle, xRegions ); + } + } + } + } +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xTaskGetStaticBuffers( TaskHandle_t xTask, + StackType_t ** ppuxStackBuffer, + StaticTask_t ** ppxTaskBuffer ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xInternalTaskHandle = NULL; + int32_t lIndex; + BaseType_t xReturn = pdFALSE; + + if( xTask == NULL ) + { + xInternalTaskHandle = xTaskGetCurrentTaskHandle(); + xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGetStaticBuffers( xInternalTaskHandle, ppuxStackBuffer, ppxTaskBuffer ); + } + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t MPU_uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = configMAX_PRIORITIES; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + uxReturn = uxTaskPriorityGetFromISR( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + uxReturn = uxTaskPriorityGetFromISR( xInternalTaskHandle ); + } + } + } + + return uxReturn; + } + + #endif /* #if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t MPU_xTaskResumeFromISR( TaskHandle_t xTaskToResume ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) xTaskToResume; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskResumeFromISR( xInternalTaskHandle ); + } + } + + return xReturn; + } + + #endif /* #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )*/ +/*---------------------------------------------------------------------------------------*/ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t MPU_xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) /* PRIVILEGED_FUNCTION */ + { + TaskHookFunction_t xReturn = NULL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + if( xTask == NULL ) + { + xReturn = xTaskGetApplicationTaskTagFromISR( xTask ); + } + else + { + lIndex = ( int32_t ) xTask; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGetApplicationTaskTagFromISR( xInternalTaskHandle ); + } + } + } + + return xReturn; + } + + #endif /* #if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*---------------------------------------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t MPU_xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) xTaskToNotify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + xReturn = xTaskGenericNotifyFromISR( xInternalTaskHandle, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + + #endif /* #if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*---------------------------------------------------------------------------------------*/ + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + void MPU_vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + TaskHandle_t xInternalTaskHandle = NULL; + + lIndex = ( int32_t ) xTaskToNotify; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTaskHandle = MPU_GetTaskHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTaskHandle != NULL ) + { + vTaskGenericNotifyGiveFromISR( xInternalTaskHandle, uxIndexToNotify, pxHigherPriorityTaskWoken ); + } + } + } + #endif /*#if ( configUSE_TASK_NOTIFICATIONS == 1 )*/ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for queue APIs. */ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueGenericSendImpl( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + BaseType_t xIsItemToQueueReadable = pdFALSE; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + if( pvItemToQueue != NULL ) + { + xIsItemToQueueReadable = xPortIsAuthorizedToAccessBuffer( pvItemToQueue, + uxQueueGetQueueItemSize( xInternalQueueHandle ), + tskMPU_READ_PERMISSION ); + } + + if( ( pvItemToQueue == NULL ) || ( xIsItemToQueueReadable == pdTRUE ) ) + { + xReturn = xQueueGenericSend( xInternalQueueHandle, pvItemToQueue, xTicksToWait, xCopyPosition ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxQueueMessagesWaitingImpl( const QueueHandle_t pxQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + UBaseType_t uxReturn = 0; + + lIndex = ( int32_t ) pxQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + uxReturn = uxQueueMessagesWaiting( xInternalQueueHandle ); + } + } + + return uxReturn; + } +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxQueueSpacesAvailableImpl( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + UBaseType_t uxReturn = 0; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + uxReturn = uxQueueSpacesAvailable( xInternalQueueHandle ); + } + } + + return uxReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueReceiveImpl( QueueHandle_t pxQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + BaseType_t xIsReceiveBufferWritable = pdFALSE; + + lIndex = ( int32_t ) pxQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer, + uxQueueGetQueueItemSize( xInternalQueueHandle ), + tskMPU_WRITE_PERMISSION ); + + if( xIsReceiveBufferWritable == pdTRUE ) + { + xReturn = xQueueReceive( xInternalQueueHandle, pvBuffer, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueuePeekImpl( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + BaseType_t xIsReceiveBufferWritable = pdFALSE; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xIsReceiveBufferWritable = xPortIsAuthorizedToAccessBuffer( pvBuffer, + uxQueueGetQueueItemSize( xInternalQueueHandle ), + tskMPU_WRITE_PERMISSION ); + + if( xIsReceiveBufferWritable == pdTRUE ) + { + xReturn = xQueuePeek( xInternalQueueHandle, pvBuffer, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueSemaphoreTakeImpl( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueSemaphoreTake( xInternalQueueHandle, xTicksToWait ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xQueueGetMutexHolderImpl( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xMutexHolderTaskInternalHandle = NULL; + TaskHandle_t xMutexHolderTaskExternalHandle = NULL; + int32_t lIndex, lMutexHolderTaskIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + + lIndex = ( int32_t ) xSemaphore; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalQueueHandle ); + + if( xMutexHolderTaskInternalHandle != NULL ) + { + lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle ); + + if( lMutexHolderTaskIndex != -1 ) + { + xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) ); + } + } + } + } + + return xMutexHolderTaskExternalHandle; + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex, + TickType_t xBlockTime ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueTakeMutexRecursiveImpl( QueueHandle_t xMutex, + TickType_t xBlockTime ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xMutex; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueTakeMutexRecursive( xInternalQueueHandle, xBlockTime ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_RECURSIVE_MUTEXES == 1 ) + + BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueGiveMutexRecursiveImpl( QueueHandle_t xMutex ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xMutex; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGiveMutexRecursive( xInternalQueueHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet, + TickType_t xBlockTimeTicks ) PRIVILEGED_FUNCTION; + + QueueSetMemberHandle_t MPU_xQueueSelectFromSetImpl( QueueSetHandle_t xQueueSet, + TickType_t xBlockTimeTicks ) /* PRIVILEGED_FUNCTION */ + { + QueueSetHandle_t xInternalQueueSetHandle = NULL; + QueueSetMemberHandle_t xSelectedMemberInternal = NULL; + QueueSetMemberHandle_t xSelectedMemberExternal = NULL; + int32_t lIndexQueueSet, lIndexSelectedMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + + if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + + if( xInternalQueueSetHandle != NULL ) + { + xSelectedMemberInternal = xQueueSelectFromSet( xInternalQueueSetHandle, xBlockTimeTicks ); + + if( xSelectedMemberInternal != NULL ) + { + lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal ); + + if( lIndexSelectedMember != -1 ) + { + xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) ); + } + } + } + } + + return xSelectedMemberExternal; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xQueueAddToSetImpl( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL; + QueueSetHandle_t xInternalQueueSetHandle; + int32_t lIndexQueueSet, lIndexQueueSetMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore; + + if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) && + ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) ); + + if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) ) + { + xReturn = xQueueAddToSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if configQUEUE_REGISTRY_SIZE > 0 + + void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue, + const char * pcName ) PRIVILEGED_FUNCTION; + + void MPU_vQueueAddToRegistryImpl( QueueHandle_t xQueue, + const char * pcName ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + vQueueAddToRegistry( xInternalQueueHandle, pcName ); + } + } + } + + #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ +/*-----------------------------------------------------------*/ + + #if configQUEUE_REGISTRY_SIZE > 0 + + void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + void MPU_vQueueUnregisterQueueImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + vQueueUnregisterQueue( xInternalQueueHandle ); + } + } + } + + #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ +/*-----------------------------------------------------------*/ + + #if configQUEUE_REGISTRY_SIZE > 0 + + const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION; + + const char * MPU_pcQueueGetNameImpl( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + const char * pcReturn; + QueueHandle_t xInternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + pcReturn = pcQueueGetName( xInternalQueueHandle ); + } + } + + return pcReturn; + } + + #endif /* if configQUEUE_REGISTRY_SIZE > 0 */ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Queue APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + void MPU_vQueueDelete( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + vQueueDelete( xInternalQueueHandle ); + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateMutex( const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateMutex( ucQueueType ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateMutexStatic( const uint8_t ucQueueType, + StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateMutexStatic( ucQueueType, pxStaticQueue ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateCountingSemaphore( UBaseType_t uxCountValue, + UBaseType_t uxInitialCount ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateCountingSemaphore( uxCountValue, uxInitialCount ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + QueueHandle_t MPU_xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, + const UBaseType_t uxInitialCount, + StaticQueue_t * pxStaticQueue ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + QueueHandle_t MPU_xQueueGenericCreate( UBaseType_t uxQueueLength, + UBaseType_t uxItemSize, + uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + QueueHandle_t MPU_xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, + const UBaseType_t uxItemSize, + uint8_t * pucQueueStorage, + StaticQueue_t * pxStaticQueue, + const uint8_t ucQueueType ) /* PRIVILEGED_FUNCTION */ + { + QueueHandle_t xInternalQueueHandle = NULL; + QueueHandle_t xExternalQueueHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueHandle = xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType ); + + if( xInternalQueueHandle != NULL ) + { + MPU_StoreQueueHandleAtIndex( lIndex, xInternalQueueHandle ); + xExternalQueueHandle = ( QueueHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueHandle; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericReset( QueueHandle_t xQueue, + BaseType_t xNewQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFAIL; + + lIndex = ( uint32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGenericReset( xInternalQueueHandle, xNewQueue ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + QueueSetHandle_t MPU_xQueueCreateSet( UBaseType_t uxEventQueueLength ) /* PRIVILEGED_FUNCTION */ + { + QueueSetHandle_t xInternalQueueSetHandle = NULL; + QueueSetHandle_t xExternalQueueSetHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalQueueSetHandle = xQueueCreateSet( uxEventQueueLength ); + + if( xInternalQueueSetHandle != NULL ) + { + MPU_StoreQueueSetHandleAtIndex( lIndex, xInternalQueueSetHandle ); + xExternalQueueSetHandle = ( QueueSetHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalQueueSetHandle; + } + + #endif /* if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + BaseType_t MPU_xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + QueueSetMemberHandle_t xInternalQueueSetMemberHandle = NULL; + QueueSetHandle_t xInternalQueueSetHandle; + int32_t lIndexQueueSet, lIndexQueueSetMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + lIndexQueueSetMember = ( int32_t ) xQueueOrSemaphore; + + if( ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) && + ( IS_EXTERNAL_INDEX_VALID( lIndexQueueSetMember ) != pdFALSE ) ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + xInternalQueueSetMemberHandle = MPU_GetQueueSetMemberHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSetMember ) ); + + if( ( xInternalQueueSetHandle != NULL ) && ( xInternalQueueSetMemberHandle != NULL ) ) + { + xReturn = xQueueRemoveFromSet( xInternalQueueSetMemberHandle, xInternalQueueSetHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xQueueGenericGetStaticBuffers( QueueHandle_t xQueue, + uint8_t ** ppucQueueStorage, + StaticQueue_t ** ppxStaticQueue ) /* PRIVILEGED_FUNCTION */ + { + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + BaseType_t xReturn = pdFALSE; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGenericGetStaticBuffers( xInternalQueueHandle, ppucQueueStorage, ppxStaticQueue ); + } + } + + return xReturn; + } + + #endif /*if ( configSUPPORT_STATIC_ALLOCATION == 1 )*/ +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGenericSendFromISR( QueueHandle_t xQueue, + const void * const pvItemToQueue, + BaseType_t * const pxHigherPriorityTaskWoken, + const BaseType_t xCopyPosition ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGenericSendFromISR( xInternalQueueHandle, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueGiveFromISR( QueueHandle_t xQueue, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueGiveFromISR( xInternalQueueHandle, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueuePeekFromISR( QueueHandle_t xQueue, + void * const pvBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueuePeekFromISR( xInternalQueueHandle, pvBuffer ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueReceiveFromISR( QueueHandle_t xQueue, + void * const pvBuffer, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueReceiveFromISR( xInternalQueueHandle, pvBuffer, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueIsQueueEmptyFromISR( xInternalQueueHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFAIL; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + xReturn = xQueueIsQueueFullFromISR( xInternalQueueHandle ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + + UBaseType_t MPU_uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = 0; + int32_t lIndex; + QueueHandle_t xInternalQueueHandle = NULL; + + lIndex = ( int32_t ) xQueue; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalQueueHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalQueueHandle != NULL ) + { + uxReturn = uxQueueMessagesWaitingFromISR( xInternalQueueHandle ); + } + } + + return uxReturn; + } + +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + + TaskHandle_t MPU_xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xMutexHolderTaskInternalHandle = NULL; + TaskHandle_t xMutexHolderTaskExternalHandle = NULL; + int32_t lIndex, lMutexHolderTaskIndex; + QueueHandle_t xInternalSemaphoreHandle = NULL; + + lIndex = ( int32_t ) xSemaphore; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalSemaphoreHandle = MPU_GetQueueHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalSemaphoreHandle != NULL ) + { + xMutexHolderTaskInternalHandle = xQueueGetMutexHolder( xInternalSemaphoreHandle ); + + if( xMutexHolderTaskInternalHandle != NULL ) + { + lMutexHolderTaskIndex = MPU_GetIndexForTaskHandle( xMutexHolderTaskInternalHandle ); + + if( lMutexHolderTaskIndex != -1 ) + { + xMutexHolderTaskExternalHandle = ( TaskHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lMutexHolderTaskIndex ) ); + } + } + } + } + + return xMutexHolderTaskExternalHandle; + } + + #endif /* #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_QUEUE_SETS == 1 ) + + QueueSetMemberHandle_t MPU_xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) /* PRIVILEGED_FUNCTION */ + { + QueueSetHandle_t xInternalQueueSetHandle = NULL; + QueueSetMemberHandle_t xSelectedMemberInternal = NULL; + QueueSetMemberHandle_t xSelectedMemberExternal = NULL; + int32_t lIndexQueueSet, lIndexSelectedMember; + + lIndexQueueSet = ( int32_t ) xQueueSet; + + if( IS_EXTERNAL_INDEX_VALID( lIndexQueueSet ) != pdFALSE ) + { + xInternalQueueSetHandle = MPU_GetQueueSetHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndexQueueSet ) ); + + if( xInternalQueueSetHandle != NULL ) + { + xSelectedMemberInternal = xQueueSelectFromSetFromISR( xInternalQueueSetHandle ); + + if( xSelectedMemberInternal != NULL ) + { + lIndexSelectedMember = MPU_GetIndexForQueueSetMemberHandle( xSelectedMemberInternal ); + + if( lIndexSelectedMember != -1 ) + { + xSelectedMemberExternal = ( QueueSetMemberHandle_t ) ( CONVERT_TO_EXTERNAL_INDEX( lIndexSelectedMember ) ); + } + } + } + } + + return xSelectedMemberExternal; + } + + #endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for timers APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + void * MPU_pvTimerGetTimerIDImpl( const TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + void * pvReturn = NULL; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + pvReturn = pvTimerGetTimerID( xInternalTimerHandle ); + } + } + + return pvReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer, + void * pvNewID ) PRIVILEGED_FUNCTION; + + void MPU_vTimerSetTimerIDImpl( TimerHandle_t xTimer, + void * pvNewID ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + vTimerSetTimerID( xInternalTimerHandle, pvNewID ); + } + } + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTimerIsTimerActiveImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerIsTimerActive( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) PRIVILEGED_FUNCTION; + + TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandleImpl( void ) /* PRIVILEGED_FUNCTION */ + { + TaskHandle_t xReturn; + + xReturn = xTimerGetTimerDaemonTaskHandle(); + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTimerGenericCommandImpl( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + BaseType_t xIsHigherPriorityTaskWokenWriteable = pdFALSE; + + if( pxHigherPriorityTaskWoken != NULL ) + { + xIsHigherPriorityTaskWokenWriteable = xPortIsAuthorizedToAccessBuffer( pxHigherPriorityTaskWoken, + sizeof( BaseType_t ), + tskMPU_WRITE_PERMISSION ); + } + + if( ( pxHigherPriorityTaskWoken == NULL ) || ( xIsHigherPriorityTaskWokenWriteable == pdTRUE ) ) + { + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGenericCommand( xInternalTimerHandle, xCommandID, xOptionalValue, pxHigherPriorityTaskWoken, xTicksToWait ); + } + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + const char * MPU_pcTimerGetNameImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + const char * pcReturn = NULL; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + pcReturn = pcTimerGetName( xInternalTimerHandle ); + } + } + + return pcReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer, + const UBaseType_t uxAutoReload ) PRIVILEGED_FUNCTION; + + void MPU_vTimerSetReloadModeImpl( TimerHandle_t xTimer, + const UBaseType_t uxAutoReload ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + vTimerSetReloadMode( xInternalTimerHandle, uxAutoReload ); + } + } + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetReloadMode( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxTimerGetReloadModeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t uxReturn = 0; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + uxReturn = uxTimerGetReloadMode( xInternalTimerHandle ); + } + } + + return uxReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + TickType_t MPU_xTimerGetPeriodImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + TickType_t xReturn = 0; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetPeriod( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TIMERS == 1 ) + + TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION; + + TickType_t MPU_xTimerGetExpiryTimeImpl( TimerHandle_t xTimer ) /* PRIVILEGED_FUNCTION */ + { + TickType_t xReturn = 0; + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetExpiryTime( xInternalTimerHandle ); + } + } + + return xReturn; + } + + #endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Timer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) + + TimerHandle_t MPU_xTimerCreate( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + TimerHandle_t xExternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalTimerHandle = xTimerCreate( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback ); + + if( xInternalTimerHandle != NULL ) + { + MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction ); + xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalTimerHandle; + } + + #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) + + TimerHandle_t MPU_xTimerCreateStatic( const char * const pcTimerName, + const TickType_t xTimerPeriodInTicks, + const UBaseType_t uxAutoReload, + void * const pvTimerID, + TimerCallbackFunction_t pxCallbackFunction, + StaticTimer_t * pxTimerBuffer ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + TimerHandle_t xExternalTimerHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalTimerHandle = xTimerCreateStatic( pcTimerName, xTimerPeriodInTicks, uxAutoReload, pvTimerID, MPU_TimerCallback, pxTimerBuffer ); + + if( xInternalTimerHandle != NULL ) + { + MPU_StoreTimerHandleAtIndex( lIndex, xInternalTimerHandle, pxCallbackFunction ); + xExternalTimerHandle = ( TimerHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalTimerHandle; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) + + BaseType_t MPU_xTimerGetStaticBuffer( TimerHandle_t xTimer, + StaticTimer_t ** ppxTimerBuffer ) /* PRIVILEGED_FUNCTION */ + { + TimerHandle_t xInternalTimerHandle = NULL; + int32_t lIndex; + BaseType_t xReturn = pdFALSE; + + lIndex = ( int32_t ) xTimer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalTimerHandle = MPU_GetTimerHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalTimerHandle != NULL ) + { + xReturn = xTimerGetStaticBuffer( xInternalTimerHandle, ppxTimerBuffer ); + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for event group APIs. */ +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupWaitBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupWaitBits( xInternalEventGroupHandle, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupClearBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupClearBits( xInternalEventGroupHandle, uxBitsToClear ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupSetBitsImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupSetBits( xInternalEventGroupHandle, uxBitsToSet ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + EventBits_t MPU_xEventGroupSyncImpl( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupSync( xInternalEventGroupHandle, uxBitsToSet, uxBitsToWaitFor, xTicksToWait ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) PRIVILEGED_FUNCTION; + + UBaseType_t MPU_uxEventGroupGetNumberImpl( void * xEventGroup ) /* PRIVILEGED_FUNCTION */ + { + UBaseType_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = uxEventGroupGetNumber( xInternalEventGroupHandle ); + } + } + + return xReturn; + } + + #endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + + #if ( configUSE_TRACE_FACILITY == 1 ) + + void MPU_vEventGroupSetNumberImpl( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) PRIVILEGED_FUNCTION; + + void MPU_vEventGroupSetNumberImpl( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + vEventGroupSetNumber( xInternalEventGroupHandle, uxEventGroupNumber ); + } + } + } + + #endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Event Group APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + EventGroupHandle_t MPU_xEventGroupCreate( void ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + EventGroupHandle_t xExternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalEventGroupHandle = xEventGroupCreate(); + + if( xInternalEventGroupHandle != NULL ) + { + MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle ); + xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalEventGroupHandle; + } + + #endif /* if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + EventGroupHandle_t MPU_xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + EventGroupHandle_t xExternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalEventGroupHandle = xEventGroupCreateStatic( pxEventGroupBuffer ); + + if( xInternalEventGroupHandle != NULL ) + { + MPU_StoreEventGroupHandleAtIndex( lIndex, xInternalEventGroupHandle ); + xExternalEventGroupHandle = ( EventGroupHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + + return xExternalEventGroupHandle; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + void MPU_vEventGroupDelete( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */ + { + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + vEventGroupDelete( xInternalEventGroupHandle ); + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } + } +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup, + StaticEventGroup_t ** ppxEventGroupBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupGetStaticBuffer( xInternalEventGroupHandle, ppxEventGroupBuffer ); + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t MPU_xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupClearBitsFromISR( xInternalEventGroupHandle, uxBitsToClear ); + } + } + + return xReturn; + } + + #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) + + BaseType_t MPU_xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupSetBitsFromISR( xInternalEventGroupHandle, uxBitsToSet, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + + #endif /* #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + + EventBits_t MPU_xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) /* PRIVILEGED_FUNCTION */ + { + EventBits_t xReturn = 0; + EventGroupHandle_t xInternalEventGroupHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xEventGroup; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalEventGroupHandle = MPU_GetEventGroupHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalEventGroupHandle != NULL ) + { + xReturn = xEventGroupGetBitsFromISR( xInternalEventGroupHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + +/*-----------------------------------------------------------*/ +/* MPU wrappers for stream buffer APIs. */ +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferSendImpl( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + BaseType_t xIsTxDataBufferReadable = pdFALSE; + + xIsTxDataBufferReadable = xPortIsAuthorizedToAccessBuffer( pvTxData, + xDataLengthBytes, + tskMPU_READ_PERMISSION ); + + if( xIsTxDataBufferReadable == pdTRUE ) + { + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSend( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferReceiveImpl( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + BaseType_t xIsRxDataBufferWriteable = pdFALSE; + + xIsRxDataBufferWriteable = xPortIsAuthorizedToAccessBuffer( pvRxData, + xBufferLengthBytes, + tskMPU_WRITE_PERMISSION ); + + if( xIsRxDataBufferWriteable == pdTRUE ) + { + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReceive( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, xTicksToWait ); + } + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xStreamBufferIsFullImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferIsFull( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xStreamBufferIsEmptyImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferIsEmpty( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferSpacesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSpacesAvailable( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferBytesAvailableImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferBytesAvailable( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) PRIVILEGED_FUNCTION; + + BaseType_t MPU_xStreamBufferSetTriggerLevelImpl( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSetTriggerLevel( xInternalStreamBufferHandle, xTriggerLevel ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION; + + size_t MPU_xStreamBufferNextMessageLengthBytesImpl( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferNextMessageLengthBytes( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + +/* Privileged only wrappers for Stream Buffer APIs. These are needed so that + * the application can use opaque handles maintained in mpu_wrappers.c + * with all the APIs. */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + StreamBufferHandle_t MPU_xStreamBufferGenericCreate( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */ + { + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + StreamBufferHandle_t xExternalStreamBufferHandle = NULL; + int32_t lIndex; + + /** + * Stream buffer application level callback functionality is disabled for MPU + * enabled ports. + */ + configASSERT( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ); + + if( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ) + { + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalStreamBufferHandle = xStreamBufferGenericCreate( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + NULL, + NULL ); + + if( xInternalStreamBufferHandle != NULL ) + { + MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle ); + xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + } + else + { + traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ); + xExternalStreamBufferHandle = NULL; + } + + return xExternalStreamBufferHandle; + } + + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + StreamBufferHandle_t MPU_xStreamBufferGenericCreateStatic( size_t xBufferSizeBytes, + size_t xTriggerLevelBytes, + BaseType_t xIsMessageBuffer, + uint8_t * const pucStreamBufferStorageArea, + StaticStreamBuffer_t * const pxStaticStreamBuffer, + StreamBufferCallbackFunction_t pxSendCompletedCallback, + StreamBufferCallbackFunction_t pxReceiveCompletedCallback ) /* PRIVILEGED_FUNCTION */ + { + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + StreamBufferHandle_t xExternalStreamBufferHandle = NULL; + int32_t lIndex; + + /** + * Stream buffer application level callback functionality is disabled for MPU + * enabled ports. + */ + configASSERT( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ); + + if( ( pxSendCompletedCallback == NULL ) && + ( pxReceiveCompletedCallback == NULL ) ) + { + lIndex = MPU_GetFreeIndexInKernelObjectPool(); + + if( lIndex != -1 ) + { + xInternalStreamBufferHandle = xStreamBufferGenericCreateStatic( xBufferSizeBytes, + xTriggerLevelBytes, + xIsMessageBuffer, + pucStreamBufferStorageArea, + pxStaticStreamBuffer, + NULL, + NULL ); + + if( xInternalStreamBufferHandle != NULL ) + { + MPU_StoreStreamBufferHandleAtIndex( lIndex, xInternalStreamBufferHandle ); + xExternalStreamBufferHandle = ( StreamBufferHandle_t ) CONVERT_TO_EXTERNAL_INDEX( lIndex ); + } + else + { + MPU_SetIndexFreeInKernelObjectPool( lIndex ); + } + } + } + else + { + traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ); + xExternalStreamBufferHandle = NULL; + } + + return xExternalStreamBufferHandle; + } + + #endif /* configSUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + + void MPU_vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + vStreamBufferDelete( xInternalStreamBufferHandle ); + } + + MPU_SetIndexFreeInKernelObjectPool( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + } + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReset( xInternalStreamBufferHandle ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + BaseType_t MPU_xStreamBufferGetStaticBuffers( StreamBufferHandle_t xStreamBuffers, + uint8_t * ppucStreamBufferStorageArea, + StaticStreamBuffer_t * ppxStaticStreamBuffer ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffers; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = MPU_xStreamBufferGetStaticBuffers( xInternalStreamBufferHandle, ppucStreamBufferStorageArea, ppxStaticStreamBuffer ); + } + } + + return xReturn; + } + + #endif /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSendFromISR( xInternalStreamBufferHandle, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + size_t MPU_xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + BaseType_t * const pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + size_t xReturn = 0; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReceiveFromISR( xInternalStreamBufferHandle, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) /* PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferSendCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } +/*-----------------------------------------------------------*/ + + BaseType_t MPU_xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer, + BaseType_t * pxHigherPriorityTaskWoken ) /*PRIVILEGED_FUNCTION */ + { + BaseType_t xReturn = pdFALSE; + StreamBufferHandle_t xInternalStreamBufferHandle = NULL; + int32_t lIndex; + + lIndex = ( int32_t ) xStreamBuffer; + + if( IS_EXTERNAL_INDEX_VALID( lIndex ) != pdFALSE ) + { + xInternalStreamBufferHandle = MPU_GetStreamBufferHandleAtIndex( CONVERT_TO_INTERNAL_INDEX( lIndex ) ); + + if( xInternalStreamBufferHandle != NULL ) + { + xReturn = xStreamBufferReceiveCompletedFromISR( xInternalStreamBufferHandle, pxHigherPriorityTaskWoken ); + } + } + + return xReturn; + } + +/*-----------------------------------------------------------*/ + +/* Functions that the application writer wants to execute in privileged mode + * can be defined in application_defined_privileged_functions.h. */ + + #if configINCLUDE_APPLICATION_DEFINED_PRIVILEGED_FUNCTIONS == 1 + #include "application_defined_privileged_functions.h" + #endif +/*-----------------------------------------------------------*/ + +#endif /* #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */ +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/GCC/ARM_CM23/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM23/non_secure/port.c b/portable/GCC/ARM_CM23/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM23/non_secure/port.c +++ b/portable/GCC/ARM_CM23/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM23/non_secure/portasm.c b/portable/GCC/ARM_CM23/non_secure/portasm.c index 44f159af1fa..64a24f527ff 100644 --- a/portable/GCC/ARM_CM23/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23/non_secure/portasm.c @@ -44,6 +44,109 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,83 +157,24 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r2] \n"/* Program RNR = 4. */ - " ldmia r3!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r2] \n"/* Program RNR = 5. */ - " ldmia r3!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r2] \n"/* Program RNR = 6. */ - " ldmia r3!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r2] \n"/* Program RNR = 7. */ - " ldmia r3!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r4, xRBARConst2 \n"/* r4 = 0xe000ed9c [Location of RBAR]. */ - " stmia r4!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r3 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -237,6 +281,167 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r3} \n" /* LR is now in r3. */ + " mov lr, r3 \n" /* Restore LR. */ + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " stmia r2!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r2!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r3!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r3!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r2!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " mov r6, lr \n" /* r6 = LR. */ + " stmia r2!, {r0, r3-r6} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r2, #20 \n" + " ldmia r2!, {r0, r3-r6} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + " subs r2, #20 \n" + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " mov lr, r6 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r4} \n" /* LR is now in r4. */ + " mov lr, r4 \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r2!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r3!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r2, #48 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r2, #32 \n" + " ldmia r2!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r2, #16 \n" + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,52 +465,26 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " bpl save_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stmia r2!, {r4-r7} \n"/* Store the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #48 \n"/* r2 = r2 - 48. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3-r7} \n"/* Store xSecureContext, PSPLIM, LR and the low registers that are not saved automatically. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r2!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " select_next_task: \n" " cpsid i \n" @@ -316,85 +495,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " bics r4, r5 \n"/* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r4, xRNRConst \n"/* r4 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r5, #4 \n"/* r5 = 4. */ - " str r5, [r4] \n"/* Program RNR = 4. */ - " ldmia r1!, {r6,r7} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r5, #5 \n"/* r5 = 5. */ - " str r5, [r4] \n"/* Program RNR = 5. */ - " ldmia r1!, {r6,r7} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r5, #6 \n"/* r5 = 6. */ - " str r5, [r4] \n"/* Program RNR = 6. */ - " ldmia r1!, {r6,r7} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r5, #7 \n"/* r5 = 7. */ - " str r5, [r4] \n"/* Program RNR = 7. */ - " ldmia r1!, {r6,r7} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r6,r7} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " movs r5, #1 \n"/* r5 = 1. */ - " orrs r4, r5 \n"/* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " adds r2, r2, #16 \n"/* Move to the high registers. */ @@ -411,16 +527,62 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -443,6 +605,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..a1e5ce0828f --- /dev/null +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2419 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0, r1} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0, r1} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0, r1} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0, r1} \n" + " mrs r0, control \n" + " movs r1, #1 \n" + " tst r0, r1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0, r1} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0, r1} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c index 7fb7b5ade5d..b11b6e97c5e 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portasm.c @@ -44,6 +44,106 @@ #error Cortex-M23 does not have a Floating Point Unit (FPU) and therefore configENABLE_FPU must be set to 0. #endif +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs_first_task: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -54,78 +154,21 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst2 \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -232,6 +275,136 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + " stmia r1!, {r4-r7} \n" /* Store r4-r7. */ + " mov r4, r8 \n" /* r4 = r8. */ + " mov r5, r9 \n" /* r5 = r9. */ + " mov r6, r10 \n" /* r6 = r10. */ + " mov r7, r11 \n" /* r7 = r11. */ + " stmia r1!, {r4-r7} \n" /* Store r8-r11. */ + " ldmia r2!, {r4-r7} \n" /* Copy half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " ldmia r2!, {r4-r7} \n" /* Copy rest half of the hardware saved context into r4-r7. */ + " stmia r1!, {r4-r7} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r2, psp \n" /* r2 = PSP. */ + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " mov r5, lr \n" /* r5 = LR. */ + " stmia r1!, {r2-r5} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " cpsid i \n" + " bl vTaskSwitchContext \n" + " cpsie i \n" + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " bics r2, r3 \n" /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r5} \n" /* Read first set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write first set of RBAR/RLAR registers. */ + " movs r3, #5 \n" /* r3 = 5. */ + " str r3, [r1] \n" /* Program RNR = 5. */ + " ldmia r0!, {r4-r5} \n" /* Read second set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write second set of RBAR/RLAR registers. */ + " movs r3, #6 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 6. */ + " ldmia r0!, {r4-r5} \n" /* Read third set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write third set of RBAR/RLAR registers. */ + " movs r3, #7 \n" /* r3 = 6. */ + " str r3, [r1] \n" /* Program RNR = 7. */ + " ldmia r0!, {r4-r5} \n" /* Read fourth set of RBAR/RLAR registers from TCB. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " stmia r2!, {r4-r5} \n" /* Write fourth set of RBAR/RLAR registers. */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " movs r3, #1 \n" /* r3 = 1. */ + " orrs r2, r3 \n" /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " subs r1, #16 \n" + " ldmia r1!, {r2-r5} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + " subs r1, #16 \n" + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " mov lr, r5 \n" + " \n" + " restore_general_regs: \n" + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy half of the the hardware saved context on the task stack. */ + " ldmia r1!, {r4-r7} \n" /* r4-r7 contain rest half of the hardware saved context. */ + " stmia r2!, {r4-r7} \n" /* Copy rest half of the the hardware saved context on the task stack. */ + " subs r1, #48 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r8-r11. */ + " mov r8, r4 \n" /* r8 = r4. */ + " mov r9, r5 \n" /* r9 = r5. */ + " mov r10, r6 \n" /* r10 = r6. */ + " mov r11, r7 \n" /* r11 = r7. */ + " subs r1, #32 \n" + " ldmia r1!, {r4-r7} \n" /* Restore r4-r7. */ + " subs r1, #16 \n" + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -241,30 +414,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " mrs r0, psp \n"/* Read PSP in r0. */ " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - " subs r0, r0, #44 \n"/* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r1-r7} \n"/* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #else /* configENABLE_MPU */ - " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ - " str r0, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ - " mov r4, r8 \n"/* r4 = r8. */ - " mov r5, r9 \n"/* r5 = r9. */ - " mov r6, r10 \n"/* r6 = r10. */ - " mov r7, r11 \n"/* r7 = r11. */ - " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ + " subs r0, r0, #40 \n"/* Make space for PSPLIM, LR and the remaining registers on the stack. */ + " str r0, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r0!, {r2-r7} \n"/* Store on the stack - PSPLIM, LR and low registers that are not automatically saved. */ + " mov r4, r8 \n"/* r4 = r8. */ + " mov r5, r9 \n"/* r5 = r9. */ + " mov r6, r10 \n"/* r6 = r10. */ + " mov r7, r11 \n"/* r7 = r11. */ + " stmia r0!, {r4-r7} \n"/* Store the high registers that are not saved automatically. */ " \n" " cpsid i \n" " bl vTaskSwitchContext \n" @@ -274,88 +433,76 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " bics r3, r4 \n"/* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " ldmia r1!, {r5,r6} \n"/* Read first set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write first set of RBAR/RLAR registers. */ - " movs r4, #5 \n"/* r4 = 5. */ - " str r4, [r2] \n"/* Program RNR = 5. */ - " ldmia r1!, {r5,r6} \n"/* Read second set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write second set of RBAR/RLAR registers. */ - " movs r4, #6 \n"/* r4 = 6. */ - " str r4, [r2] \n"/* Program RNR = 6. */ - " ldmia r1!, {r5,r6} \n"/* Read third set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write third set of RBAR/RLAR registers. */ - " movs r4, #7 \n"/* r4 = 7. */ - " str r4, [r2] \n"/* Program RNR = 7. */ - " ldmia r1!, {r5,r6} \n"/* Read fourth set of RBAR/RLAR from TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " stmia r3!, {r5,r6} \n"/* Write fourth set of RBAR/RLAR registers. */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " movs r4, #1 \n"/* r4 = 1. */ - " orrs r3, r4 \n"/* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " adds r0, r0, #28 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #44 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r1-r7} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - " bx r3 \n" - #else /* configENABLE_MPU */ - " adds r0, r0, #24 \n"/* Move to the high registers. */ - " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ - " mov r8, r4 \n"/* r8 = r4. */ - " mov r9, r5 \n"/* r9 = r5. */ - " mov r10, r6 \n"/* r10 = r6. */ - " mov r11, r7 \n"/* r11 = r7. */ - " msr psp, r0 \n"/* Remember the new top of stack for the task. */ - " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ - " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - " bx r3 \n" - #endif /* configENABLE_MPU */ + " adds r0, r0, #24 \n"/* Move to the high registers. */ + " ldmia r0!, {r4-r7} \n"/* Restore the high registers that are not automatically restored. */ + " mov r8, r4 \n"/* r8 = r4. */ + " mov r9, r5 \n"/* r9 = r5. */ + " mov r10, r6 \n"/* r10 = r6. */ + " mov r11, r7 \n"/* r11 = r7. */ + " msr psp, r0 \n"/* Remember the new top of stack for the task. */ + " subs r0, r0, #40 \n"/* Move to the starting of the saved context. */ + " ldmia r0!, {r2-r7} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ + " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "movs r0, #4 \n" + "mov r1, lr \n" + "tst r0, r1 \n" + "beq stack_on_msp \n" + "stack_on_psp: \n" + " mrs r0, psp \n" + " b route_svc \n" + "stack_on_msp: \n" + " mrs r0, msp \n" + " b route_svc \n" + " \n" + "route_svc: \n" + " ldr r2, [r0, #24] \n" + " subs r2, #2 \n" + " ldrb r3, [r2, #0] \n" + " cmp r3, %0 \n" + " beq system_call_enter \n" + " cmp r3, %1 \n" + " beq system_call_enter_1 \n" + " cmp r3, %2 \n" + " beq system_call_exit \n" + " b vPortSVCHandler_C \n" + " \n" + "system_call_enter: \n" + " b vSystemCallEnter \n" + "system_call_enter_1: \n" + " b vSystemCallEnter_1 \n" + "system_call_exit: \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "r3", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -378,4 +525,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM33/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM33/non_secure/port.c b/portable/GCC/ARM_CM33/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM33/non_secure/port.c +++ b/portable/GCC/ARM_CM33/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM33/non_secure/portasm.c b/portable/GCC/ARM_CM33/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM33/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM35P/non_secure/port.c b/portable/GCC/ARM_CM35P/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM35P/non_secure/port.c +++ b/portable/GCC/ARM_CM35P/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM35P/non_secure/portasm.c b/portable/GCC/ARM_CM35P/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portasm.c +++ b/portable/GCC/ARM_CM35P/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..df9239a41d6 --- /dev/null +++ b/portable/GCC/ARM_CM3_MPU/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c index 1a3f9473178..d29b31d74f5 100755 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -90,6 +90,7 @@ /* Constants required to set up the initial stack. */ #define portINITIAL_XPSR ( 0x01000000 ) +#define portINITIAL_EXC_RETURN ( 0xfffffffdUL ) #define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 ) #define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 ) @@ -103,12 +104,31 @@ #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) #define portPRIGROUP_SHIFT ( 8UL ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) + +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /* @@ -146,7 +166,7 @@ static void prvRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIV * C portion of the SVC handler. The SVC handler is split between an asm entry * and a C wrapper for simplicity of coding and maintenance. */ -static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; +void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; /** * @brief Checks whether or not the processor is privileged. @@ -182,6 +202,53 @@ void vResetPrivilege( void ) __attribute__( ( naked ) ); #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + */ + void vSystemCallEnter( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ /* Each task maintains its own interrupt status in the critical nesting @@ -207,34 +274,91 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = 0; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ - return pxTopOfStack; + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ -void vPortSVCHandler( void ) +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq vSystemCallEnter \n" + "cmp r2, %1 \n" + "beq vSystemCallEnter_1 \n" + "cmp r2, %2 \n" + "beq vSystemCallExit \n" + "b vSVCHandler_C \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ { /* Assumes psp was in use. */ __asm volatile @@ -248,12 +372,14 @@ void vPortSVCHandler( void ) " mrs r0, psp \n" #endif " b %0 \n" - ::"i" ( prvSVCHandler ) : "r0", "memory" + ::"i" ( vSVCHandler_C ) : "r0", "memory" ); } + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ -static void prvSVCHandler( uint32_t * pulParam ) +void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */ { uint8_t ucSVCNumber; uint32_t ulPC; @@ -262,7 +388,7 @@ static void prvSVCHandler( uint32_t * pulParam ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being - * exported from linker scripts. */ + * exported from linker scripts. */ extern uint32_t * __syscalls_flash_start__; extern uint32_t * __syscalls_flash_end__; #else @@ -296,7 +422,6 @@ static void prvSVCHandler( uint32_t * pulParam ) break; - #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) case portSVC_RAISE_PRIVILEGE: /* Only raise the privilege, if the * svc was raised from any of the @@ -325,7 +450,7 @@ static void prvSVCHandler( uint32_t * pulParam ) ::: "r1", "memory" ); break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -333,45 +458,311 @@ static void prvSVCHandler( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulSystemCallLocation, i; + const uint32_t ulStackFrameSize = 8; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulSystemCallLocation, i; + const uint32_t ulStackFrameSize = 8; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulSystemCallLocation, i; + const uint32_t ulStackFrameSize = 8; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " orr r1, #1 \n" /* Set nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + static void prvRestoreContextOfFirstTask( void ) { __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ - " ldr r0, [r0] \n" - " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ - " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */ - " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */ - " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - " \n" - " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" - " msr psp, r0 \n"/* Restore the task stack pointer. */ - " mov r0, #0 \n" - " msr basepri, r0 \n" - " ldr r14, =0xfffffffd \n"/* Load exec return code. */ - " bx r14 \n" - " \n" - " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " \n" + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ + " \n" + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx lr \n" + " \n" + " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" ); } @@ -585,53 +976,66 @@ void xPortPendSVHandler( void ) __asm volatile ( - " mrs r0, psp \n" + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */ + " \n" + /*------------ Save Context. ----------- */ + " mrs r3, control \n" + " mrs r0, psp \n" + " isb \n" " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ - " ldr r2, [r3] \n" + " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */ + " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */ + " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */ " \n" - " mrs r1, control \n" - " stmdb r0!, {r1, r4-r11} \n"/* Save the remaining registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + /*---------- Select next task. --------- */ + " mov r0, %0 \n" + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" " \n" - " stmdb sp!, {r3, r14} \n" - " mov r0, %0 \n" - " msr basepri, r0 \n" - " dsb \n" - " isb \n" - " bl vTaskSwitchContext \n" - " mov r0, #0 \n" - " msr basepri, r0 \n" - " ldmia sp!, {r3, r14} \n" - " \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers. */ - " stmia r2!, {r4-r11} \n"/* Write 4 sets of MPU registers. */ + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ " \n" - " ldmia r0!, {r3, r4-r11} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ " \n" - " msr psp, r0 \n" - " bx r14 \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" " \n" - " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); @@ -816,11 +1220,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -843,6 +1255,11 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( portMPU_REGION_CACHEABLE_BUFFERABLE ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); } lIndex = 0; @@ -863,12 +1280,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -877,6 +1310,47 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + #if ( configASSERT_DEFINED == 1 ) void vPortValidateInterruptPriority( void ) @@ -936,4 +1410,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ \ No newline at end of file +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM3_MPU/portmacro.h b/portable/GCC/ARM_CM3_MPU/portmacro.h index e73447bf7e1..d1f659e3789 100644 --- a/portable/GCC/ARM_CM3_MPU/portmacro.h +++ b/portable/GCC/ARM_CM3_MPU/portmacro.h @@ -104,10 +104,45 @@ uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; -/* Plus 1 to create space for the stack region. */ + typedef struct MPU_REGION_SETTINGS + { + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; + } xMPU_REGION_SETTINGS; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + #define MAX_CONTEXT_SIZE 20 + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -118,9 +153,12 @@ /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ - #define portSVC_START_SCHEDULER 0 - #define portSVC_YIELD 1 - #define portSVC_RAISE_PRIVILEGE 2 + #define portSVC_START_SCHEDULER 0 + #define portSVC_YIELD 1 + #define portSVC_RAISE_PRIVILEGE 2 + #define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ + #define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ + #define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -232,6 +270,16 @@ #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ + extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..df9239a41d6 --- /dev/null +++ b/portable/GCC/ARM_CM4_MPU/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c index 8a7f4c1a4e4..bbcf733b89c 100755 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -118,13 +118,35 @@ #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) #define portPRIGROUP_SHIFT ( 8UL ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) + /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) +/*-----------------------------------------------------------*/ + /* * Configure a number of standard MPU regions that are used by all tasks. */ @@ -160,7 +182,7 @@ static void prvRestoreContextOfFirstTask( void ) __attribute__( ( naked ) ) PRIV * C portion of the SVC handler. The SVC handler is split between an asm entry * and a C wrapper for simplicity of coding and maintenance. */ -static void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; +void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( noinline ) ) PRIVILEGED_FUNCTION; /* * Function to enable the VFP. @@ -201,6 +223,56 @@ void vResetPrivilege( void ) __attribute__( ( naked ) ); #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ /* Each task maintains its own interrupt status in the critical nesting @@ -227,39 +299,102 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = 0; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - - /* A save method is being used that requires each task to maintain its - * own exec return value. */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; - - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; + } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ - return pxTopOfStack; + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vPortSVCHandler( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void vPortSVCHandler( void ) { /* Assumes psp was in use. */ @@ -274,12 +409,14 @@ void vPortSVCHandler( void ) " mrs r0, psp \n" #endif " b %0 \n" - ::"i" ( prvSVCHandler ) : "r0", "memory" + ::"i" ( vSVCHandler_C ) : "r0", "memory" ); } + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ -static void prvSVCHandler( uint32_t * pulParam ) +void vSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */ { uint8_t ucSVCNumber; uint32_t ulPC; @@ -288,7 +425,7 @@ static void prvSVCHandler( uint32_t * pulParam ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being - * exported from linker scripts. */ + * exported from linker scripts. */ extern uint32_t * __syscalls_flash_start__; extern uint32_t * __syscalls_flash_end__; #else @@ -350,7 +487,7 @@ static void prvSVCHandler( uint32_t * pulParam ) ::: "r1", "memory" ); break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -358,52 +495,364 @@ static void prvSVCHandler( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " orr r1, #1 \n" /* Set nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + static void prvRestoreContextOfFirstTask( void ) { __asm volatile ( - " ldr r0, =0xE000ED08 \n"/* Use the NVIC offset register to locate the stack. */ - " ldr r0, [r0] \n" - " ldr r0, [r0] \n" - " msr msp, r0 \n"/* Set the msp back to the start of the stack. */ - " ldr r3, pxCurrentTCBConst2 \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ - " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ - " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* configTOTAL_MPU_REGIONS == 16. */ - " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - " \n" - " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" - " msr psp, r0 \n"/* Restore the task stack pointer. */ - " mov r0, #0 \n" - " msr basepri, r0 \n" - " bx r14 \n" - " \n" - " .ltorg \n"/* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" - "pxCurrentTCBConst2: .word pxCurrentTCB \n" + " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */ + " ldr r0, [r0] \n" + " ldr r0, [r0] \n" + " msr msp, r0 \n" /* Set the msp back to the start of the stack. */ + " \n" + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ + " \n" + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ + " \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst2 \n" /* r3 = pxCurrentTCBConst2. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" + " bx lr \n" + " \n" + " .ltorg \n" /* Assemble current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB\n" ); } /*-----------------------------------------------------------*/ @@ -639,76 +1088,94 @@ void xPortPendSVHandler( void ) __asm volatile ( - " mrs r0, psp \n" - " isb \n" + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location where the context should be saved. */ " \n" - " ldr r3, pxCurrentTCBConst \n"/* Get the location of the current TCB. */ - " ldr r2, [r3] \n" + /*------------ Save Context. ----------- */ + " mrs r3, control \n" + " mrs r0, psp \n" + " isb \n" " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, push high vfp registers. */ - " it eq \n" - " vstmdbeq r0!, {s16-s31} \n" + " add r0, r0, #0x20 \n" /* Move r0 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r0, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r0, r0, #0x20 \n" /* Set r0 back to the location of hardware saved context. */ " \n" - " mrs r1, control \n" - " stmdb r0!, {r1, r4-r11, r14} \n"/* Save the remaining registers. */ - " str r0, [r2] \n"/* Save the new top of stack into the first member of the TCB. */ + " stmia r1!, {r3-r11, lr} \n" /* Store CONTROL register, r4-r11 and LR. */ + " ldmia r0, {r4-r11} \n" /* Copy hardware saved context into r4-r11. */ + " stmia r1!, {r0, r4-r11} \n" /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + " str r1, [r2] \n" /* Save the location from where the context should be restored as the first member of TCB. */ " \n" - " stmdb sp!, {r0, r3} \n" - " mov r0, %0 \n" - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - " cpsid i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif - " msr basepri, r0 \n" - " dsb \n" - " isb \n" - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - " cpsie i \n"/* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif - " bl vTaskSwitchContext \n" - " mov r0, #0 \n" - " msr basepri, r0 \n" - " ldmia sp!, {r0, r3} \n" - " \n"/* Restore the context. */ - " ldr r1, [r3] \n" - " ldr r0, [r1] \n"/* The first item in the TCB is the task top of stack. */ - " add r1, r1, #4 \n"/* Move onto the second item in the TCB... */ + /*---------- Select next task. --------- */ + " mov r0, %0 \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsid i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif + " msr basepri, r0 \n" + " dsb \n" + " isb \n" + #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + " cpsie i \n" /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ + #endif + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" " \n" - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r3, #1 \n"/* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - " str r3, [r2] \n"/* Disable MPU. */ + /*------------ Program MPU. ------------ */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " add r2, r2, #4 \n" /* r2 = Second item in the TCB which is xMPUSettings. */ " \n" - " ldr r2, =0xe000ed9c \n"/* Region Base Address register. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " bic r3, #1 \n" /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + " str r3, [r0] \n" /* Disable MPU. */ " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - " stmia r2, {r4-r11} \n"/* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* configTOTAL_MPU_REGIONS == 16. */ + " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ " \n" - " ldr r2, =0xe000ed94 \n"/* MPU_CTRL register. */ - " ldr r3, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r3, #1 \n"/* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - " str r3, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ + #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ " \n" - " ldmia r0!, {r3-r11, r14} \n"/* Pop the registers that are not automatically saved on exception entry. */ - " msr control, r3 \n" + " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ + " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ + " orr r3, #1 \n" /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + " str r3, [r0] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ " \n" - " tst r14, #0x10 \n"/* Is the task using the FPU context? If so, pop the high vfp registers too. */ - " it eq \n" - " vldmiaeq r0!, {s16-s31} \n" + /*---------- Restore Context. ---------- */ + " ldr r3, pxCurrentTCBConst \n" /* r3 = pxCurrentTCBConst. */ + " ldr r2, [r3] \n" /* r2 = pxCurrentTCB. */ + " ldr r1, [r2] \n" /* r1 = Location of saved context in TCB. */ " \n" - " msr psp, r0 \n" - " bx r14 \n" + " ldmdb r1!, {r0, r4-r11} \n" /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + " msr psp, r0 \n" + " stmia r0!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r3-r11, lr} \n" /* r3 contains CONTROL register. r4-r11 and LR restored. */ + " msr control, r3 \n" + + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r0!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + + " str r1, [r2] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" " \n" - " .ltorg \n"/* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */ - " .align 4 \n" - "pxCurrentTCBConst: .word pxCurrentTCB \n" + " .ltorg \n" /* Assemble the current literal pool to avoid offset-out-of-bound errors with lto. */ + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } @@ -939,11 +1406,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -966,6 +1441,12 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); } lIndex = 0; @@ -986,12 +1467,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -1000,6 +1497,47 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + #if ( configASSERT_DEFINED == 1 ) void vPortValidateInterruptPriority( void ) @@ -1059,4 +1597,4 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } #endif /* configASSERT_DEFINED */ -/*-----------------------------------------------------------*/ \ No newline at end of file +/*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM4_MPU/portmacro.h b/portable/GCC/ARM_CM4_MPU/portmacro.h index f3365d14371..5417feaefd2 100644 --- a/portable/GCC/ARM_CM4_MPU/portmacro.h +++ b/portable/GCC/ARM_CM4_MPU/portmacro.h @@ -193,9 +193,45 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; +typedef struct MPU_REGION_SETTINGS +{ + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; +} xMPU_REGION_SETTINGS; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + +#define MAX_CONTEXT_SIZE 52 + +/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ +#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) +#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -206,9 +242,12 @@ typedef struct MPU_SETTINGS /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ -#define portSVC_START_SCHEDULER 0 -#define portSVC_YIELD 1 -#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_START_SCHEDULER 0 +#define portSVC_YIELD 1 +#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -320,6 +359,16 @@ extern void vResetPrivilege( void ); #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ +extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + portFORCE_INLINE static BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM55/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM55/non_secure/port.c b/portable/GCC/ARM_CM55/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM55/non_secure/port.c +++ b/portable/GCC/ARM_CM55/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM55/non_secure/portasm.c b/portable/GCC/ARM_CM55/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM55/non_secure/portasm.c +++ b/portable/GCC/ARM_CM55/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM85/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM85/non_secure/port.c b/portable/GCC/ARM_CM85/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM85/non_secure/port.c +++ b/portable/GCC/ARM_CM85/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM85/non_secure/portasm.c b/portable/GCC/ARM_CM85/non_secure/portasm.c index 9f9b2e68d39..f7ec7d9c072 100644 --- a/portable/GCC/ARM_CM85/non_secure/portasm.c +++ b/portable/GCC/ARM_CM85/non_secure/portasm.c @@ -40,95 +40,120 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile ( - " .syntax unified \n" - " \n" - " ldr r2, pxCurrentTCBConst2 \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r3, [r2] \n"/* Read pxCurrentTCB. */ - " ldr r0, [r3] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ - " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - " ldr r4, [r3] \n"/* r4 = *r3 i.e. r4 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r2] \n"/* Program RNR = 4. */ - " adds r3, #4 \n"/* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r3!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 set of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r3, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst2 \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xSecureContextConst2: .word xSecureContext \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r4} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - " ldr r5, xSecureContextConst2 \n" - " str r1, [r5] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " msr control, r3 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r4 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ - " ldr r4, xSecureContextConst2 \n" - " str r1, [r4] \n"/* Set xSecureContext to this task's value for the same. */ - " msr psplim, r2 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r3, [r2] \n" /* Read pxCurrentTCB. */ + " ldr r0, [r3] \n" /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" + " ldm r0!, {r1-r3} \n" /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ + " ldr r4, xSecureContextConst2 \n" + " str r1, [r4] \n" /* Set xSecureContext to this task's value for the same. */ + " msr psplim, r2 \n" /* Set this task's PSPLIM value. */ + " movs r1, #2 \n" /* r1 = 2. */ + " msr CONTROL, r1 \n" /* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n" /* Discard everything up to r0. */ + " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx r3 \n" /* Finally, branch to EXC_RETURN. */ " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" "xSecureContextConst2: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -236,6 +261,160 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern SecureContext_SaveContext \n" + " .extern SecureContext_LoadContext \n" + " \n" + " ldr r3, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " ldr r0, [r3] \n" /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + " ldr r2, [r1] \n" /* r2 = Location in TCB where the context should be saved. */ + " \n" + " cbz r0, save_ns_context \n" /* No secure context to save. */ + " save_s_context: \n" + " push {r0-r2, lr} \n" + " bl SecureContext_SaveContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r0-r2, lr} \n" + " \n" + " save_ns_context: \n" + " mov r3, lr \n" /* r3 = LR (EXC_RETURN). */ + " lsls r3, r3, #25 \n" /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi save_special_regs \n" /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " save_general_regs: \n" + " mrs r3, psp \n" + " \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r3, r3, #0x20 \n" /* Move r3 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r2!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r3, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r2!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r3, r3, #0x20 \n" /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r2!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r3, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r2!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psp \n" /* r3 = PSP. */ + " mrs r4, psplim \n" /* r4 = PSPLIM. */ + " mrs r5, control \n" /* r5 = CONTROL. */ + " stmia r2!, {r0, r3-r5, lr} \n" /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r2, [r1] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r3] \n" /* r0 = pxCurrentTCB.*/ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r3, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n" /* r1 = pxCurrentTCB.*/ + " ldr r2, [r1] \n" /* r2 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r2!, {r0, r3-r5, lr} \n" /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + " msr psp, r3 \n" + " msr psplim, r4 \n" + " msr control, r5 \n" + " ldr r4, xSecureContextConst \n" /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r4] \n" /* Restore xSecureContext. */ + " cbz r0, restore_ns_context \n" /* No secure context to restore. */ + " \n" + " restore_s_context: \n" + " push {r1-r3, lr} \n" + " bl SecureContext_LoadContext \n" /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r1-r3, lr} \n" + " \n" + " restore_ns_context: \n" + " mov r0, lr \n" /* r0 = LR (EXC_RETURN). */ + " lsls r0, r0, #25 \n" /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bmi restore_context_done \n" /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + " \n" + " restore_general_regs: \n" + " ldmdb r2!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r3!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r2!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r2!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r3!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r2!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r2, [r1] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xSecureContextConst: .word xSecureContext \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -260,20 +439,11 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " \n" " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r3] \n"/* Read pxCurrentTCB.*/ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #16 \n"/* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #12 \n"/* Make space for xSecureContext, PSPLIM and LR on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " b select_next_task \n" " \n" " save_ns_context: \n" @@ -284,26 +454,14 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " it eq \n" " vstmdbeq r2!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " subs r2, r2, #48 \n"/* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #16 \n"/* r2 = r2 + 16. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r3, control \n"/* r3 = CONTROL. */ - " mov r4, lr \n"/* r4 = LR/EXC_RETURN. */ - " subs r2, r2, #16 \n"/* r2 = r2 - 16. */ - " stmia r2!, {r0, r1, r3, r4} \n"/* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ - " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ - " str r2, [r1] \n"/* Save the new top of stack in TCB. */ - " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ - " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ - " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ + " subs r2, r2, #44 \n"/* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ + " str r2, [r1] \n"/* Save the new top of stack in TCB. */ + " adds r2, r2, #12 \n"/* r2 = r2 + 12. */ + " stm r2, {r4-r11} \n"/* Store the registers that are not saved automatically. */ + " mrs r1, psplim \n"/* r1 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " subs r2, r2, #12 \n"/* r2 = r2 - 12. */ + " stmia r2!, {r0, r1, r3} \n"/* Store xSecureContext, PSPLIM and LR on the stack. */ " \n" " select_next_task: \n" " mov r0, %0 \n"/* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ @@ -318,83 +476,22 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ " ldr r2, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r3] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r4, [r1] \n"/* r4 = *r1 i.e. r4 = MAIR0. */ - " ldr r3, xMAIR0Const \n"/* r3 = 0xe000edc0 [Location of MAIR0]. */ - " str r4, [r3] \n"/* Program MAIR0. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #4 \n"/* r4 = 4. */ - " str r4, [r3] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #8 \n"/* r4 = 8. */ - " str r4, [r3] \n"/* Program RNR = 8. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r3, xRNRConst \n"/* r3 = 0xe000ed98 [Location of RNR]. */ - " movs r4, #12 \n"/* r4 = 12. */ - " str r4, [r3] \n"/* Program RNR = 12. */ - " ldr r3, xRBARConst \n"/* r3 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r3!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r3, xMPUCTRLConst \n"/* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r3] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r3] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r2!, {r0, r1, r3, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r3 \n"/* Restore the CONTROL register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #else /* configENABLE_MPU */ - " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " mov lr, r4 \n"/* LR = r4. */ - " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ - " str r0, [r3] \n"/* Restore the task's xSecureContext. */ - " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ - " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ - " push {r2, r4} \n" - " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - " pop {r2, r4} \n" - " mov lr, r4 \n"/* LR = r4. */ - " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - " msr psp, r2 \n"/* Remember the new top of stack for the task. */ - " bx lr \n" - #endif /* configENABLE_MPU */ + " ldmia r2!, {r0, r1, r4} \n"/* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ + " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ + " mov lr, r4 \n"/* LR = r4. */ + " ldr r3, xSecureContextConst \n"/* Read the location of xSecureContext i.e. &( xSecureContext ). */ + " str r0, [r3] \n"/* Restore the task's xSecureContext. */ + " cbz r0, restore_ns_context \n"/* If there is no secure context for the task, restore the non-secure context. */ + " ldr r3, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r1, [r3] \n"/* Read pxCurrentTCB. */ + " push {r2, r4} \n" + " bl SecureContext_LoadContext \n"/* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + " pop {r2, r4} \n" + " mov lr, r4 \n"/* LR = r4. */ + " lsls r1, r4, #25 \n"/* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + " bpl restore_ns_context \n"/* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ + " msr psp, r2 \n"/* Remember the new top of stack for the task. */ + " bx lr \n" " \n" " restore_ns_context: \n" " ldmia r2!, {r4-r11} \n"/* Restore the registers that are not automatically restored. */ @@ -409,17 +506,60 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" "xSecureContextConst: .word xSecureContext \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -437,6 +577,8 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ void vPortAllocateSecureContext( uint32_t ulSecureStackSize ) /* __attribute__ (( naked )) */ diff --git a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..6e2043427fe --- /dev/null +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.c @@ -0,0 +1,2349 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskDelayUntilImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskDelayUntil_Unpriv \n" + " MPU_xTaskDelayUntil_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskDelayUntilImpl \n" + " MPU_xTaskDelayUntil_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskDelayUntilImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskAbortDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskAbortDelay_Unpriv \n" + " MPU_xTaskAbortDelay_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskAbortDelayImpl \n" + " MPU_xTaskAbortDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskAbortDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskDelayImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskDelay_Unpriv \n" + " MPU_vTaskDelay_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskDelayImpl \n" + " MPU_vTaskDelay_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskDelayImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskPriorityGetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskPriorityGet_Unpriv \n" + " MPU_uxTaskPriorityGet_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskPriorityGetImpl \n" + " MPU_uxTaskPriorityGet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskPriorityGetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_eTaskGetStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_eTaskGetState_Unpriv \n" + " MPU_eTaskGetState_Priv: \n" + " pop {r0} \n" + " b MPU_eTaskGetStateImpl \n" + " MPU_eTaskGetState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_eTaskGetStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskGetInfoImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskGetInfo_Unpriv \n" + " MPU_vTaskGetInfo_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskGetInfoImpl \n" + " MPU_vTaskGetInfo_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskGetInfoImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetIdleTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetIdleTaskHandle_Unpriv \n" + " MPU_xTaskGetIdleTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetIdleTaskHandleImpl \n" + " MPU_xTaskGetIdleTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetIdleTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSuspendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSuspend_Unpriv \n" + " MPU_vTaskSuspend_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSuspendImpl \n" + " MPU_vTaskSuspend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSuspendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskResumeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskResume_Unpriv \n" + " MPU_vTaskResume_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskResumeImpl \n" + " MPU_vTaskResume_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskResumeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTaskGetTickCount( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetTickCountImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetTickCount_Unpriv \n" + " MPU_xTaskGetTickCount_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetTickCountImpl \n" + " MPU_xTaskGetTickCount_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetTickCountImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetNumberOfTasksImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetNumberOfTasks_Unpriv \n" + " MPU_uxTaskGetNumberOfTasks_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetNumberOfTasksImpl \n" + " MPU_uxTaskGetNumberOfTasks_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetNumberOfTasksImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTaskGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTaskGetName_Unpriv \n" + " MPU_pcTaskGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTaskGetNameImpl \n" + " MPU_pcTaskGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTaskGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimeCounterImpl \n" + " MPU_ulTaskGetRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetRunTimePercent_Unpriv \n" + " MPU_ulTaskGetRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetRunTimePercentImpl \n" + " MPU_ulTaskGetRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimePercentImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimePercent_Unpriv \n" + " MPU_ulTaskGetIdleRunTimePercent_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimePercentImpl \n" + " MPU_ulTaskGetIdleRunTimePercent_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimePercentImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv \n" + " MPU_ulTaskGetIdleRunTimeCounter_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " MPU_ulTaskGetIdleRunTimeCounter_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGetIdleRunTimeCounterImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetApplicationTaskTag_Unpriv \n" + " MPU_vTaskSetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetApplicationTaskTagImpl \n" + " MPU_vTaskSetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetApplicationTaskTagImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetApplicationTaskTag_Unpriv \n" + " MPU_xTaskGetApplicationTaskTag_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetApplicationTaskTagImpl \n" + " MPU_xTaskGetApplicationTaskTag_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetApplicationTaskTagImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv \n" + " MPU_vTaskSetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " MPU_vTaskSetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Priv: \n" + " pop {r0} \n" + " b MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTaskGetThreadLocalStoragePointerImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetSystemStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetSystemState_Unpriv \n" + " MPU_uxTaskGetSystemState_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetSystemStateImpl \n" + " MPU_uxTaskGetSystemState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetSystemStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMarkImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMarkImpl \n" + " MPU_uxTaskGetStackHighWaterMark_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMarkImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTaskGetStackHighWaterMark2Impl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTaskGetStackHighWaterMark2_Unpriv \n" + " MPU_uxTaskGetStackHighWaterMark2_Priv: \n" + " pop {r0} \n" + " b MPU_uxTaskGetStackHighWaterMark2Impl \n" + " MPU_uxTaskGetStackHighWaterMark2_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTaskGetStackHighWaterMark2Impl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetCurrentTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetCurrentTaskHandle_Unpriv \n" + " MPU_xTaskGetCurrentTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetCurrentTaskHandleImpl \n" + " MPU_xTaskGetCurrentTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetCurrentTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGetSchedulerState( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGetSchedulerStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGetSchedulerState_Unpriv \n" + " MPU_xTaskGetSchedulerState_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGetSchedulerStateImpl \n" + " MPU_xTaskGetSchedulerState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGetSchedulerStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTaskSetTimeOutStateImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTaskSetTimeOutState_Unpriv \n" + " MPU_vTaskSetTimeOutState_Priv: \n" + " pop {r0} \n" + " b MPU_vTaskSetTimeOutStateImpl \n" + " MPU_vTaskSetTimeOutState_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTaskSetTimeOutStateImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskCheckForTimeOutImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskCheckForTimeOut_Unpriv \n" + " MPU_xTaskCheckForTimeOut_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskCheckForTimeOutImpl \n" + " MPU_xTaskCheckForTimeOut_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskCheckForTimeOutImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotify_Unpriv \n" + " MPU_xTaskGenericNotify_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyImpl \n" + " MPU_xTaskGenericNotify_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyWaitImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyWait_Unpriv \n" + " MPU_xTaskGenericNotifyWait_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyWaitImpl \n" + " MPU_xTaskGenericNotifyWait_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyWaitImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyTake_Unpriv \n" + " MPU_ulTaskGenericNotifyTake_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyTakeImpl \n" + " MPU_ulTaskGenericNotifyTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTaskGenericNotifyStateClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTaskGenericNotifyStateClear_Unpriv \n" + " MPU_xTaskGenericNotifyStateClear_Priv: \n" + " pop {r0} \n" + " b MPU_xTaskGenericNotifyStateClearImpl \n" + " MPU_xTaskGenericNotifyStateClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTaskGenericNotifyStateClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_ulTaskGenericNotifyValueClearImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_ulTaskGenericNotifyValueClear_Unpriv \n" + " MPU_ulTaskGenericNotifyValueClear_Priv: \n" + " pop {r0} \n" + " b MPU_ulTaskGenericNotifyValueClearImpl \n" + " MPU_ulTaskGenericNotifyValueClear_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_ulTaskGenericNotifyValueClearImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGenericSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGenericSend_Unpriv \n" + " MPU_xQueueGenericSend_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGenericSendImpl \n" + " MPU_xQueueGenericSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGenericSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueMessagesWaitingImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueMessagesWaiting_Unpriv \n" + " MPU_uxQueueMessagesWaiting_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueMessagesWaitingImpl \n" + " MPU_uxQueueMessagesWaiting_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueMessagesWaitingImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxQueueSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxQueueSpacesAvailable_Unpriv \n" + " MPU_uxQueueSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_uxQueueSpacesAvailableImpl \n" + " MPU_uxQueueSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxQueueSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueReceive_Unpriv \n" + " MPU_xQueueReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueReceiveImpl \n" + " MPU_xQueueReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueuePeekImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueuePeek_Unpriv \n" + " MPU_xQueuePeek_Priv: \n" + " pop {r0} \n" + " b MPU_xQueuePeekImpl \n" + " MPU_xQueuePeek_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueuePeekImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSemaphoreTakeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSemaphoreTake_Unpriv \n" + " MPU_xQueueSemaphoreTake_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSemaphoreTakeImpl \n" + " MPU_xQueueSemaphoreTake_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSemaphoreTakeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGetMutexHolderImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGetMutexHolder_Unpriv \n" + " MPU_xQueueGetMutexHolder_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGetMutexHolderImpl \n" + " MPU_xQueueGetMutexHolder_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGetMutexHolderImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueTakeMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueTakeMutexRecursive_Unpriv \n" + " MPU_xQueueTakeMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueTakeMutexRecursiveImpl \n" + " MPU_xQueueTakeMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueTakeMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueGiveMutexRecursiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueGiveMutexRecursive_Unpriv \n" + " MPU_xQueueGiveMutexRecursive_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueGiveMutexRecursiveImpl \n" + " MPU_xQueueGiveMutexRecursive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueGiveMutexRecursiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueSelectFromSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueSelectFromSet_Unpriv \n" + " MPU_xQueueSelectFromSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueSelectFromSetImpl \n" + " MPU_xQueueSelectFromSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueSelectFromSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xQueueAddToSetImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xQueueAddToSet_Unpriv \n" + " MPU_xQueueAddToSet_Priv: \n" + " pop {r0} \n" + " b MPU_xQueueAddToSetImpl \n" + " MPU_xQueueAddToSet_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xQueueAddToSetImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueAddToRegistryImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueAddToRegistry_Unpriv \n" + " MPU_vQueueAddToRegistry_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueAddToRegistryImpl \n" + " MPU_vQueueAddToRegistry_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueAddToRegistryImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vQueueUnregisterQueueImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vQueueUnregisterQueue_Unpriv \n" + " MPU_vQueueUnregisterQueue_Priv: \n" + " pop {r0} \n" + " b MPU_vQueueUnregisterQueueImpl \n" + " MPU_vQueueUnregisterQueue_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vQueueUnregisterQueueImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcQueueGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcQueueGetName_Unpriv \n" + " MPU_pcQueueGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcQueueGetNameImpl \n" + " MPU_pcQueueGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcQueueGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pvTimerGetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pvTimerGetTimerID_Unpriv \n" + " MPU_pvTimerGetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_pvTimerGetTimerIDImpl \n" + " MPU_pvTimerGetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pvTimerGetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetTimerIDImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetTimerID_Unpriv \n" + " MPU_vTimerSetTimerID_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetTimerIDImpl \n" + " MPU_vTimerSetTimerID_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetTimerIDImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerIsTimerActiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerIsTimerActive_Unpriv \n" + " MPU_xTimerIsTimerActive_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerIsTimerActiveImpl \n" + " MPU_xTimerIsTimerActive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerIsTimerActiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetTimerDaemonTaskHandleImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGenericCommandImpl \n" + " \n" + " push {r0} \n" + " mrs r0, ipsr \n" + " cmp r0, #0 \n" + " bne MPU_xTimerGenericCommand_Priv \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " beq MPU_xTimerGenericCommand_Priv \n" + " MPU_xTimerGenericCommand_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGenericCommandImpl \n" + " svc %1 \n" + " bx lr \n" + " MPU_xTimerGenericCommand_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGenericCommandImpl \n" + " \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_pcTimerGetNameImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_pcTimerGetName_Unpriv \n" + " MPU_pcTimerGetName_Priv: \n" + " pop {r0} \n" + " b MPU_pcTimerGetNameImpl \n" + " MPU_pcTimerGetName_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_pcTimerGetNameImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vTimerSetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vTimerSetReloadMode_Unpriv \n" + " MPU_vTimerSetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_vTimerSetReloadModeImpl \n" + " MPU_vTimerSetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vTimerSetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetReloadMode_Unpriv \n" + " MPU_xTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetReloadModeImpl \n" + " MPU_xTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxTimerGetReloadModeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxTimerGetReloadMode_Unpriv \n" + " MPU_uxTimerGetReloadMode_Priv: \n" + " pop {r0} \n" + " b MPU_uxTimerGetReloadModeImpl \n" + " MPU_uxTimerGetReloadMode_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxTimerGetReloadModeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetPeriodImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetPeriod_Unpriv \n" + " MPU_xTimerGetPeriod_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetPeriodImpl \n" + " MPU_xTimerGetPeriod_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetPeriodImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xTimerGetExpiryTimeImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xTimerGetExpiryTime_Unpriv \n" + " MPU_xTimerGetExpiryTime_Priv: \n" + " pop {r0} \n" + " b MPU_xTimerGetExpiryTimeImpl \n" + " MPU_xTimerGetExpiryTime_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xTimerGetExpiryTimeImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupWaitBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupWaitBits_Unpriv \n" + " MPU_xEventGroupWaitBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupWaitBitsImpl \n" + " MPU_xEventGroupWaitBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupWaitBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupClearBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupClearBits_Unpriv \n" + " MPU_xEventGroupClearBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupClearBitsImpl \n" + " MPU_xEventGroupClearBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupClearBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSetBitsImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSetBits_Unpriv \n" + " MPU_xEventGroupSetBits_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSetBitsImpl \n" + " MPU_xEventGroupSetBits_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSetBitsImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xEventGroupSyncImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xEventGroupSync_Unpriv \n" + " MPU_xEventGroupSync_Priv: \n" + " pop {r0} \n" + " b MPU_xEventGroupSyncImpl \n" + " MPU_xEventGroupSync_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xEventGroupSyncImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_uxEventGroupGetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_uxEventGroupGetNumber_Unpriv \n" + " MPU_uxEventGroupGetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_uxEventGroupGetNumberImpl \n" + " MPU_uxEventGroupGetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_uxEventGroupGetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_vEventGroupSetNumberImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_vEventGroupSetNumber_Unpriv \n" + " MPU_vEventGroupSetNumber_Priv: \n" + " pop {r0} \n" + " b MPU_vEventGroupSetNumberImpl \n" + " MPU_vEventGroupSetNumber_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_vEventGroupSetNumberImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSendImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSend_Unpriv \n" + " MPU_xStreamBufferSend_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSendImpl \n" + " MPU_xStreamBufferSend_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSendImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferReceiveImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferReceive_Unpriv \n" + " MPU_xStreamBufferReceive_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferReceiveImpl \n" + " MPU_xStreamBufferReceive_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferReceiveImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsFullImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsFull_Unpriv \n" + " MPU_xStreamBufferIsFull_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsFullImpl \n" + " MPU_xStreamBufferIsFull_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsFullImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferIsEmptyImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferIsEmpty_Unpriv \n" + " MPU_xStreamBufferIsEmpty_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferIsEmptyImpl \n" + " MPU_xStreamBufferIsEmpty_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferIsEmptyImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSpacesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSpacesAvailable_Unpriv \n" + " MPU_xStreamBufferSpacesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSpacesAvailableImpl \n" + " MPU_xStreamBufferSpacesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSpacesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferBytesAvailableImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferBytesAvailable_Unpriv \n" + " MPU_xStreamBufferBytesAvailable_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferBytesAvailableImpl \n" + " MPU_xStreamBufferBytesAvailable_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferBytesAvailableImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferSetTriggerLevelImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferSetTriggerLevel_Unpriv \n" + " MPU_xStreamBufferSetTriggerLevel_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferSetTriggerLevelImpl \n" + " MPU_xStreamBufferSetTriggerLevel_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferSetTriggerLevelImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) __attribute__ (( naked )) FREERTOS_SYSTEM_CALL; + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* __attribute__ (( naked )) FREERTOS_SYSTEM_CALL */ +{ + __asm volatile + ( + " .syntax unified \n" + " .extern MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " \n" + " push {r0} \n" + " mrs r0, control \n" + " tst r0, #1 \n" + " bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv \n" + " MPU_xStreamBufferNextMessageLengthBytes_Priv: \n" + " pop {r0} \n" + " b MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " MPU_xStreamBufferNextMessageLengthBytes_Unpriv: \n" + " pop {r0} \n" + " svc %0 \n" + " bl MPU_xStreamBufferNextMessageLengthBytesImpl \n" + " svc %1 \n" + " bx lr \n" + " \n" + : : "i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" + ); +} +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c index a78529d04d9..504b6bf3be3 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portasm.c @@ -40,6 +40,88 @@ * header files. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +#if ( configENABLE_MPU == 1 ) + +void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " program_mpu_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const2 \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst2 \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst2 \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst2 \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context_first_task: \n" + " ldr r2, pxCurrentTCBConst2 \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs_first_task: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs_first_task: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + " \n" + " restore_context_done_first_task: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " mov r0, #0 \n" + " msr basepri, r0 \n" /* Ensure that interrupts are enabled when the first task starts. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst2: .word pxCurrentTCB \n" + " xMPUCTRLConst2: .word 0xe000ed94 \n" + " xMAIR0Const2: .word 0xe000edc0 \n" + " xRNRConst2: .word 0xe000ed98 \n" + " xRBARConst2: .word 0xe000ed9c \n" + ); +} + +#else /* configENABLE_MPU */ + void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -50,80 +132,23 @@ void vRestoreContextOfFirstTask( void ) /* __attribute__ (( naked )) PRIVILEGED_ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const2 \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst2 \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst2 \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 set of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst2 \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldm r0!, {r1-r3} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " msr control, r2 \n"/* Set this task's CONTROL value. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r3 \n"/* Finally, branch to EXC_RETURN. */ - #else /* configENABLE_MPU */ - " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ - " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ - " movs r1, #2 \n"/* r1 = 2. */ - " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ - " adds r0, #32 \n"/* Discard everything up to r0. */ - " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ - " isb \n" - " mov r0, #0 \n" - " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ - " bx r2 \n"/* Finally, branch to EXC_RETURN. */ - #endif /* configENABLE_MPU */ + " ldm r0!, {r1-r2} \n"/* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ + " msr psplim, r1 \n"/* Set this task's PSPLIM value. */ + " movs r1, #2 \n"/* r1 = 2. */ + " msr CONTROL, r1 \n"/* Switch to use PSP in the thread mode. */ + " adds r0, #32 \n"/* Discard everything up to r0. */ + " msr psp, r0 \n"/* This is now the new top of stack to use in the task. */ + " isb \n" + " mov r0, #0 \n" + " msr basepri, r0 \n"/* Ensure that interrupts are enabled when the first task starts. */ + " bx r2 \n"/* Finally, branch to EXC_RETURN. */ " \n" " .align 4 \n" "pxCurrentTCBConst2: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst2: .word 0xe000ed94 \n" - "xMAIR0Const2: .word 0xe000edc0 \n" - "xRNRConst2: .word 0xe000ed98 \n" - "xRBARConst2: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xIsPrivileged( void ) /* __attribute__ (( naked )) */ @@ -231,6 +256,129 @@ void vClearInterruptMask( __attribute__( ( unused ) ) uint32_t ulMask ) /* __att } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + " .syntax unified \n" + " \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " ldr r1, [r0] \n" /* r1 = Location in TCB where the context should be saved. */ + " mrs r2, psp \n" /* r2 = PSP. */ + " \n" + " save_general_regs: \n" + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " add r2, r2, #0x20 \n" /* Move r2 to location where s0 is saved. */ + " tst lr, #0x10 \n" + " ittt eq \n" + " vstmiaeq r1!, {s16-s31} \n" /* Store s16-s31. */ + " vldmiaeq r2, {s0-s16} \n" /* Copy hardware saved FP context into s0-s16. */ + " vstmiaeq r1!, {s0-s16} \n" /* Store hardware saved FP context. */ + " sub r2, r2, #0x20 \n" /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " stmia r1!, {r4-r11} \n" /* Store r4-r11. */ + " ldmia r2, {r4-r11} \n" /* Copy the hardware saved context into r4-r11. */ + " stmia r1!, {r4-r11} \n" /* Store the hardware saved context. */ + " \n" + " save_special_regs: \n" + " mrs r3, psplim \n" /* r3 = PSPLIM. */ + " mrs r4, control \n" /* r4 = CONTROL. */ + " stmia r1!, {r2-r4, lr} \n" /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + " str r1, [r0] \n" /* Save the location from where the context should be restored as the first member of TCB. */ + " \n" + " select_next_task: \n" + " mov r0, %0 \n" /* r0 = configMAX_SYSCALL_INTERRUPT_PRIORITY */ + " msr basepri, r0 \n" /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + " dsb \n" + " isb \n" + " bl vTaskSwitchContext \n" + " mov r0, #0 \n" /* r0 = 0. */ + " msr basepri, r0 \n" /* Enable interrupts. */ + " \n" + " program_mpu: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB. */ + " \n" + " dmb \n" /* Complete outstanding transfers before disabling MPU. */ + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " bic r2, #1 \n" /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + " str r2, [r1] \n" /* Disable MPU. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + " ldr r1, [r0] \n" /* r1 = *r0 i.e. r1 = MAIR0. */ + " ldr r2, xMAIR0Const \n" /* r2 = 0xe000edc0 [Location of MAIR0]. */ + " str r1, [r2] \n" /* Program MAIR0. */ + " \n" + " adds r0, #4 \n" /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + " ldr r1, xRNRConst \n" /* r1 = 0xe000ed98 [Location of RNR]. */ + " ldr r2, xRBARConst \n" /* r2 = 0xe000ed9c [Location of RBAR]. */ + " \n" + " movs r3, #4 \n" /* r3 = 4. */ + " str r3, [r1] \n" /* Program RNR = 4. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " \n" + #if ( configTOTAL_MPU_REGIONS == 16 ) + " movs r3, #8 \n" /* r3 = 8. */ + " str r3, [r1] \n" /* Program RNR = 8. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + " movs r3, #12 \n" /* r3 = 12. */ + " str r3, [r1] \n" /* Program RNR = 12. */ + " ldmia r0!, {r4-r11} \n" /* Read 4 sets of RBAR/RLAR registers from TCB. */ + " stmia r2, {r4-r11} \n" /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + " \n" + " ldr r1, xMPUCTRLConst \n" /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + " ldr r2, [r1] \n" /* Read the value of MPU_CTRL. */ + " orr r2, #1 \n" /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + " str r2, [r1] \n" /* Enable MPU. */ + " dsb \n" /* Force memory writes before continuing. */ + " \n" + " restore_context: \n" + " ldr r2, pxCurrentTCBConst \n" /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + " ldr r0, [r2] \n" /* r0 = pxCurrentTCB.*/ + " ldr r1, [r0] \n" /* r1 = Location of saved context in TCB. */ + " \n" + " restore_special_regs: \n" + " ldmdb r1!, {r2-r4, lr} \n" /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + " msr psp, r2 \n" + " msr psplim, r3 \n" + " msr control, r4 \n" + " \n" + " restore_general_regs: \n" + " ldmdb r1!, {r4-r11} \n" /* r4-r11 contain hardware saved context. */ + " stmia r2!, {r4-r11} \n" /* Copy the hardware saved context on the task stack. */ + " ldmdb r1!, {r4-r11} \n" /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + " tst lr, #0x10 \n" + " ittt eq \n" + " vldmdbeq r1!, {s0-s16} \n" /* s0-s16 contain hardware saved FP context. */ + " vstmiaeq r2!, {s0-s16} \n" /* Copy hardware saved FP context on the task stack. */ + " vldmdbeq r1!, {s16-s31} \n" /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + " \n" + " restore_context_done: \n" + " str r1, [r0] \n" /* Save the location where the context should be saved next as the first member of TCB. */ + " bx lr \n" + " \n" + " .align 4 \n" + " pxCurrentTCBConst: .word pxCurrentTCB \n" + " xMPUCTRLConst: .word 0xe000ed94 \n" + " xMAIR0Const: .word 0xe000edc0 \n" + " xRNRConst: .word 0xe000ed98 \n" + " xRBARConst: .word 0xe000ed9c \n" + ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) + ); +} + +#else /* configENABLE_MPU */ + void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -238,21 +386,16 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " .syntax unified \n" " \n" " mrs r0, psp \n"/* Read PSP in r0. */ + " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst lr, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ " it eq \n" " vstmdbeq r0!, {s16-s31} \n"/* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - " mrs r1, psplim \n"/* r1 = PSPLIM. */ - " mrs r2, control \n"/* r2 = CONTROL. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r1-r11} \n"/* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ - #else /* configENABLE_MPU */ - " mrs r2, psplim \n"/* r2 = PSPLIM. */ - " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ - " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ - #endif /* configENABLE_MPU */ + " \n" + " mrs r2, psplim \n"/* r2 = PSPLIM. */ + " mov r3, lr \n"/* r3 = LR/EXC_RETURN. */ + " stmdb r0!, {r2-r11} \n"/* Store on the stack - PSPLIM, LR and registers that are not automatically saved. */ " \n" " ldr r2, pxCurrentTCBConst \n"/* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ @@ -270,52 +413,7 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " ldr r1, [r2] \n"/* Read pxCurrentTCB. */ " ldr r0, [r1] \n"/* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ " \n" - #if ( configENABLE_MPU == 1 ) - " dmb \n"/* Complete outstanding transfers before disabling MPU. */ - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " bic r4, #1 \n"/* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - " str r4, [r2] \n"/* Disable MPU. */ - " \n" - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - " ldr r3, [r1] \n"/* r3 = *r1 i.e. r3 = MAIR0. */ - " ldr r2, xMAIR0Const \n"/* r2 = 0xe000edc0 [Location of MAIR0]. */ - " str r3, [r2] \n"/* Program MAIR0. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #4 \n"/* r3 = 4. */ - " str r3, [r2] \n"/* Program RNR = 4. */ - " adds r1, #4 \n"/* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " \n" - #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #8 \n"/* r3 = 8. */ - " str r3, [r2] \n"/* Program RNR = 8. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - " ldr r2, xRNRConst \n"/* r2 = 0xe000ed98 [Location of RNR]. */ - " movs r3, #12 \n"/* r3 = 12. */ - " str r3, [r2] \n"/* Program RNR = 12. */ - " ldr r2, xRBARConst \n"/* r2 = 0xe000ed9c [Location of RBAR]. */ - " ldmia r1!, {r4-r11} \n"/* Read 4 sets of RBAR/RLAR registers from TCB. */ - " stmia r2!, {r4-r11} \n"/* Write 4 set of RBAR/RLAR registers using alias registers. */ - #endif /* configTOTAL_MPU_REGIONS == 16 */ - " \n" - " ldr r2, xMPUCTRLConst \n"/* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - " ldr r4, [r2] \n"/* Read the value of MPU_CTRL. */ - " orr r4, #1 \n"/* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - " str r4, [r2] \n"/* Enable MPU. */ - " dsb \n"/* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - " \n" - #if ( configENABLE_MPU == 1 ) - " ldmia r0!, {r1-r11} \n"/* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ - #else /* configENABLE_MPU */ - " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ - #endif /* configENABLE_MPU */ + " ldmia r0!, {r2-r11} \n"/* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ " \n" #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) " tst r3, #0x10 \n"/* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -323,28 +421,66 @@ void PendSV_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ " vldmiaeq r0!, {s16-s31} \n"/* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ " \n" - #if ( configENABLE_MPU == 1 ) - " msr psplim, r1 \n"/* Restore the PSPLIM register value for the task. */ - " msr control, r2 \n"/* Restore the CONTROL register value for the task. */ - #else /* configENABLE_MPU */ - " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ - #endif /* configENABLE_MPU */ + " msr psplim, r2 \n"/* Restore the PSPLIM register value for the task. */ " msr psp, r0 \n"/* Remember the new top of stack for the task. */ " bx r3 \n" " \n" " .align 4 \n" "pxCurrentTCBConst: .word pxCurrentTCB \n" - #if ( configENABLE_MPU == 1 ) - "xMPUCTRLConst: .word 0xe000ed94 \n" - "xMAIR0Const: .word 0xe000edc0 \n" - "xRNRConst: .word 0xe000ed98 \n" - "xRBARConst: .word 0xe000ed9c \n" - #endif /* configENABLE_MPU */ ::"i" ( configMAX_SYSCALL_INTERRUPT_PRIORITY ) ); } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ +{ + __asm volatile + ( + ".syntax unified \n" + ".extern vPortSVCHandler_C \n" + ".extern vSystemCallEnter \n" + ".extern vSystemCallEnter_1 \n" + ".extern vSystemCallExit \n" + " \n" + "tst lr, #4 \n" + "ite eq \n" + "mrseq r0, msp \n" + "mrsne r0, psp \n" + " \n" + "ldr r1, [r0, #24] \n" + "ldrb r2, [r1, #-2] \n" + "cmp r2, %0 \n" + "beq syscall_enter \n" + "cmp r2, %1 \n" + "beq syscall_enter_1 \n" + "cmp r2, %2 \n" + "beq syscall_exit \n" + "b vPortSVCHandler_C \n" + " \n" + "syscall_enter: \n" + " mov r1, lr \n" + " b vSystemCallEnter \n" + " \n" + "syscall_enter_1: \n" + " mov r1, lr \n" + " b vSystemCallEnter_1 \n" + " \n" + "syscall_exit: \n" + " mov r1, lr \n" + " b vSystemCallExit \n" + " \n" + : /* No outputs. */ + :"i" ( portSVC_SYSTEM_CALL_ENTER ), "i" ( portSVC_SYSTEM_CALL_ENTER_1 ), "i" ( portSVC_SYSTEM_CALL_EXIT ) + : "r0", "r1", "r2", "memory" + ); +} + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ { __asm volatile @@ -362,4 +498,6 @@ void SVC_Handler( void ) /* __attribute__ (( naked )) PRIVILEGED_FUNCTION */ "svchandler_address_const: .word vPortSVCHandler_C \n" ); } + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ diff --git a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/GCC/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/IAR/ARM_CM23/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM23/non_secure/port.c b/portable/IAR/ARM_CM23/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM23/non_secure/port.c +++ b/portable/IAR/ARM_CM23/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM23/non_secure/portasm.s b/portable/IAR/ARM_CM23/non_secure/portasm.s index fffed8df619..648ae005010 100644 --- a/portable/IAR/ARM_CM23/non_secure/portasm.s +++ b/portable/IAR/ARM_CM23/non_secure/portasm.s @@ -33,12 +33,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -98,65 +107,99 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r2] /* Program RNR = 4. */ - ldmia r3!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r2] /* Program RNR = 5. */ - ldmia r3!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r2] /* Program RNR = 6. */ - ldmia r3!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r2] /* Program RNR = 7. */ - ldmia r3!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r4, =0xe000ed9c /* r4 = 0xe000ed9c [Location of RBAR]. */ - stmia r4!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -167,6 +210,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -199,6 +243,149 @@ vClearInterruptMask: msr PRIMASK, r0 bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later.*/ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r3} /* LR is now in r3. */ + mov lr, r3 /* Restore LR. */ + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + stmia r2!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r2!, {r4-r7} /* Store r8-r11. */ + ldmia r3!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + ldmia r3!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r2!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + mov r6, lr /* r6 = LR. */ + stmia r2!, {r0, r3-r6} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + subs r2, #20 + ldmia r2!, {r0, r3-r6} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, r6 = LR. */ + subs r2, #20 + msr psp, r3 + msr psplim, r4 + msr control, r5 + mov lr, r6 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r4} /* LR is now in r4. */ + mov lr, r4 + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + subs r2, #32 + ldmia r2!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r2!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r3!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r2, #48 + ldmia r2!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r2, #32 + ldmia r2!, {r4-r7} /* Restore r4-r7. */ + subs r2, #16 + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ @@ -216,41 +403,18 @@ PendSV_Handler: bpl save_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ + subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ + b select_next_task save_ns_context: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stmia r2!, {r4-r7} /* Store the low registers that are not saved automatically. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #48 /* r2 = r2 - 48. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ @@ -261,7 +425,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r2!, {r4-r7} /* Store the high registers that are not saved automatically. */ - #endif /* configENABLE_MPU */ select_next_task: cpsid i @@ -272,68 +435,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - bics r4, r5 /* r4 = r4 & ~r5 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r4, =0xe000ed98 /* r4 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r5, #4 /* r5 = 4. */ - str r5, [r4] /* Program RNR = 4. */ - ldmia r1!, {r6,r7} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write first set of RBAR/RLAR registers. */ - movs r5, #5 /* r5 = 5. */ - str r5, [r4] /* Program RNR = 5. */ - ldmia r1!, {r6,r7} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write second set of RBAR/RLAR registers. */ - movs r5, #6 /* r5 = 6. */ - str r5, [r4] /* Program RNR = 6. */ - ldmia r1!, {r6,r7} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write third set of RBAR/RLAR registers. */ - movs r5, #7 /* r5 = 7. */ - str r5, [r4] /* Program RNR = 7. */ - ldmia r1!, {r6,r7} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r6,r7} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - movs r5, #1 /* r5 = 1. */ - orrs r4, r5 /* r4 = r4 | r5 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -350,7 +451,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: adds r2, r2, #16 /* Move to the high registers. */ @@ -363,8 +463,45 @@ PendSV_Handler: subs r2, r2, #32 /* Go back to the low registers. */ ldmia r2!, {r4-r7} /* Restore the low registers that are not automatically restored. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -375,6 +512,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..867642b5e97 --- /dev/null +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1623 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0, r1} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0, r1} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0, r1} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0, r1} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0, r1} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0, r1} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0, r1} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0, r1} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0, r1} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0, r1} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0, r1} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0, r1} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0, r1} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0, r1} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0, r1} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0, r1} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0, r1} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0, r1} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0, r1} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0, r1} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0, r1} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0, r1} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0, r1} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0, r1} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0, r1} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0, r1} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0, r1} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0, r1} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0, r1} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0, r1} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0, r1} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0, r1} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0, r1} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0, r1} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0, r1} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0, r1} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0, r1} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0, r1} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0, r1} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0, r1} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + movs r1, #1 + tst r0, r1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0, r1} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0, r1} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0, r1} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0, r1} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0, r1} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0, r1} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0, r1} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0, r1} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0, r1} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0, r1} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0, r1} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0, r1} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0, r1} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0, r1} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0, r1} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0, r1} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0, r1} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0, r1} + mrs r0, control + movs r1, #1 + tst r0, r1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0, r1} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0, r1} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s index 62bd3872284..8f77c4dafb1 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -88,63 +97,97 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs_first_task: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -153,6 +196,7 @@ vRestoreContextOfFirstTask: msr psp, r0 /* This is now the new top of stack to use in the task. */ isb bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -187,23 +231,127 @@ vClearInterruptMask: bx lr /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + stmia r1!, {r4-r7} /* Store r4-r7. */ + mov r4, r8 /* r4 = r8. */ + mov r5, r9 /* r5 = r9. */ + mov r6, r10 /* r6 = r10. */ + mov r7, r11 /* r7 = r11. */ + stmia r1!, {r4-r7} /* Store r8-r11. */ + ldmia r2!, {r4-r7} /* Copy half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + ldmia r2!, {r4-r7} /* Copy rest half of the hardware saved context into r4-r7. */ + stmia r1!, {r4-r7} /* Store the hardware saved context. */ + + save_special_regs: + mrs r2, psp /* r2 = PSP. */ + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + mov r5, lr /* r5 = LR. */ + stmia r1!, {r2-r5} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + cpsid i + bl vTaskSwitchContext + cpsie i + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + bics r2, r3 /* r2 = r2 & ~r3 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r5} /* Read first set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write first set of RBAR/RLAR registers. */ + movs r3, #5 /* r3 = 5. */ + str r3, [r1] /* Program RNR = 5. */ + ldmia r0!, {r4-r5} /* Read second set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write second set of RBAR/RLAR registers. */ + movs r3, #6 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 6. */ + ldmia r0!, {r4-r5} /* Read third set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write third set of RBAR/RLAR registers. */ + movs r3, #7 /* r3 = 6. */ + str r3, [r1] /* Program RNR = 7. */ + ldmia r0!, {r4-r5} /* Read fourth set of RBAR/RLAR registers from TCB. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + stmia r2!, {r4-r5} /* Write fourth set of RBAR/RLAR registers. */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + movs r3, #1 /* r3 = 1. */ + orrs r2, r3 /* r2 = r2 | r3 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + subs r1, #16 + ldmia r1!, {r2-r5} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, r5 = LR. */ + subs r1, #16 + msr psp, r2 + msr psplim, r3 + msr control, r4 + mov lr, r5 + + restore_general_regs: + subs r1, #32 + ldmia r1!, {r4-r7} /* r4-r7 contain half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy half of the the hardware saved context on the task stack. */ + ldmia r1!, {r4-r7} /* r4-r7 contain rest half of the hardware saved context. */ + stmia r2!, {r4-r7} /* Copy rest half of the the hardware saved context on the task stack. */ + subs r1, #48 + ldmia r1!, {r4-r7} /* Restore r8-r11. */ + mov r8, r4 /* r8 = r4. */ + mov r9, r5 /* r9 = r5. */ + mov r10, r6 /* r10 = r6. */ + mov r11, r7 /* r11 = r7. */ + subs r1, #32 + ldmia r1!, {r4-r7} /* Restore r4-r7. */ + subs r1, #16 + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r0, r0, #44 /* Make space for PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r0, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmia r0!, {r1-r7} /* Store on the stack - PSPLIM, CONTROL, LR and low registers that are not automatically saved. */ - mov r4, r8 /* r4 = r8. */ - mov r5, r9 /* r5 = r9. */ - mov r6, r10 /* r6 = r10. */ - mov r7, r11 /* r7 = r11. */ - stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#else /* configENABLE_MPU */ + subs r0, r0, #40 /* Make space for PSPLIM, LR and the remaining registers on the stack. */ str r0, [r1] /* Save the new top of stack in TCB. */ mrs r2, psplim /* r2 = PSPLIM. */ @@ -214,7 +362,6 @@ PendSV_Handler: mov r6, r10 /* r6 = r10. */ mov r7, r11 /* r7 = r11. */ stmia r0!, {r4-r7} /* Store the high registers that are not saved automatically. */ -#endif /* configENABLE_MPU */ cpsid i bl vTaskSwitchContext @@ -224,63 +371,6 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - bics r3, r4 /* r3 = r3 & ~r4 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - ldmia r1!, {r5,r6} /* Read first set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write first set of RBAR/RLAR registers. */ - movs r4, #5 /* r4 = 5. */ - str r4, [r2] /* Program RNR = 5. */ - ldmia r1!, {r5,r6} /* Read second set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write second set of RBAR/RLAR registers. */ - movs r4, #6 /* r4 = 6. */ - str r4, [r2] /* Program RNR = 6. */ - ldmia r1!, {r5,r6} /* Read third set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write third set of RBAR/RLAR registers. */ - movs r4, #7 /* r4 = 7. */ - str r4, [r2] /* Program RNR = 7. */ - ldmia r1!, {r5,r6} /* Read fourth set of RBAR/RLAR from TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - stmia r3!, {r5,r6} /* Write fourth set of RBAR/RLAR registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - movs r4, #1 /* r4 = 1. */ - orrs r3, r4 /* r3 = r3 | r4 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - adds r0, r0, #28 /* Move to the high registers. */ - ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ - mov r8, r4 /* r8 = r4. */ - mov r9, r5 /* r9 = r5. */ - mov r10, r6 /* r10 = r6. */ - mov r11, r7 /* r11 = r7. */ - msr psp, r0 /* Remember the new top of stack for the task. */ - subs r0, r0, #44 /* Move to the starting of the saved context. */ - ldmia r0!, {r1-r7} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r7 restored. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ - bx r3 -#else /* configENABLE_MPU */ adds r0, r0, #24 /* Move to the high registers. */ ldmia r0!, {r4-r7} /* Restore the high registers that are not automatically restored. */ mov r8, r4 /* r8 = r4. */ @@ -292,9 +382,45 @@ PendSV_Handler: ldmia r0!, {r2-r7} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r7 restored. */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ bx r3 + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + movs r0, #4 + mov r1, lr + tst r0, r1 + beq stack_on_msp + stack_on_psp: + mrs r0, psp + b route_svc + stack_on_msp: + mrs r0, msp + b route_svc + + route_svc: + ldr r2, [r0, #24] + subs r2, #2 + ldrb r3, [r2, #0] + cmp r3, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq system_call_enter + cmp r3, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq system_call_enter_1 + cmp r3, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq system_call_exit + b vPortSVCHandler_C + + system_call_enter: + b vSystemCallEnter + system_call_enter_1: + b vSystemCallEnter_1 + system_call_exit: + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: movs r0, #4 mov r1, lr @@ -305,6 +431,8 @@ SVC_Handler: stacking_used_msp: mrs r0, msp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM23_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM33/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM33/non_secure/port.c b/portable/IAR/ARM_CM33/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM33/non_secure/port.c +++ b/portable/IAR/ARM_CM33/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM33/non_secure/portasm.s b/portable/IAR/ARM_CM33/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM33/non_secure/portasm.s +++ b/portable/IAR/ARM_CM33/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM33_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM35P/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM35P/non_secure/port.c b/portable/IAR/ARM_CM35P/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM35P/non_secure/port.c +++ b/portable/IAR/ARM_CM35P/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM35P/non_secure/portasm.s b/portable/IAR/ARM_CM35P/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portasm.s +++ b/portable/IAR/ARM_CM35P/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM35P_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..a0541f790ba --- /dev/null +++ b/portable/IAR/ARM_CM4F_MPU/mpu_wrappers_v2_asm.S @@ -0,0 +1,1556 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Including FreeRTOSConfig.h here will cause build errors if the header file + * contains code not understood by the assembler - for example the 'extern' keyword. + * To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so + * the code is included in C files but excluded by the preprocessor in assembly + * files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ +#include "FreeRTOSConfig.h" + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM4F_MPU/port.c b/portable/IAR/ARM_CM4F_MPU/port.c index 2d70b339a3a..27f6f0d6c9a 100755 --- a/portable/IAR/ARM_CM4F_MPU/port.c +++ b/portable/IAR/ARM_CM4F_MPU/port.c @@ -132,8 +132,14 @@ #define portINITIAL_CONTROL_IF_UNPRIVILEGED ( 0x03 ) #define portINITIAL_CONTROL_IF_PRIVILEGED ( 0x02 ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) /* The systick is a 24-bit counter. */ #define portMAX_24_BIT_NUMBER ( 0xffffffUL ) @@ -147,6 +153,21 @@ * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) +/*-----------------------------------------------------------*/ + /* * Configure a number of standard MPU regions that are used by all tasks. */ @@ -184,7 +205,7 @@ extern void vPortEnableVFP( void ); /* * The C portion of the SVC handler. */ -void vPortSVCHandler_C( uint32_t * pulParam ); +void vPortSVCHandler_C( uint32_t * pulParam ) PRIVILEGED_FUNCTION; /* * Called from the SVC handler used to start the scheduler. @@ -208,6 +229,57 @@ extern void vPortRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + /*-----------------------------------------------------------*/ /* Each task maintains its own interrupt status in the critical nesting @@ -233,46 +305,56 @@ static UBaseType_t uxCriticalNesting = 0xaaaaaaaa; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - - /* Offset added to account for the way the MCU uses the stack on entry/exit - * of interrupts, and to ensure alignment. */ - pxTopOfStack--; - - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0; /* LR */ - - /* Save code space by skipping register initialisation. */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - - /* A save method is being used that requires each task to maintain its - * own exec return value. */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; - - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; + } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ - return pxTopOfStack; + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ -void vPortSVCHandler_C( uint32_t * pulParam ) +void vPortSVCHandler_C( uint32_t * pulParam ) /* PRIVILEGED_FUNCTION */ { uint8_t ucSVCNumber; uint32_t ulPC; @@ -334,7 +416,7 @@ void vPortSVCHandler_C( uint32_t * pulParam ) ::: "r1", "memory" ); break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -342,6 +424,308 @@ void vPortSVCHandler_C( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " bic r1, #1 \n" /* Clear nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r1, control \n" /* Obtain current control value. */ + " orr r1, #1 \n" /* Set nPRIV bit. */ + " msr control, r1 \n" /* Write back new control value. */ + ::: "r1", "memory" + ); + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + /* * See header file for description. */ @@ -738,11 +1122,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -765,6 +1157,13 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + } lIndex = 0; @@ -785,12 +1184,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -799,6 +1214,48 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + + #if ( configASSERT_DEFINED == 1 ) void vPortValidateInterruptPriority( void ) diff --git a/portable/IAR/ARM_CM4F_MPU/portasm.s b/portable/IAR/ARM_CM4F_MPU/portasm.s index db751f6e5ef..3cbe5e0f5ed 100644 --- a/portable/IAR/ARM_CM4F_MPU/portasm.s +++ b/portable/IAR/ARM_CM4F_MPU/portasm.s @@ -25,6 +25,7 @@ * https://github.com/FreeRTOS * */ + /* Including FreeRTOSConfig.h here will cause build errors if the header file contains code not understood by the assembler - for example the 'extern' keyword. To avoid errors place any such code inside a #ifdef __ICCARM__/#endif block so @@ -38,6 +39,9 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit PUBLIC xPortPendSVHandler PUBLIC vPortSVCHandler @@ -49,99 +53,141 @@ files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. /*-----------------------------------------------------------*/ +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 3 +#define portSVC_SYSTEM_CALL_ENTER_1 4 +#define portSVC_SYSTEM_CALL_EXIT 5 +/*-----------------------------------------------------------*/ + xPortPendSVHandler: - mrs r0, psp - isb - /* Get the location of the current TCB. */ + ldr r3, =pxCurrentTCB - ldr r2, [r3] + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location where the context should be saved. */ - /* Is the task using the FPU context? If so, push high vfp registers. */ - tst r14, #0x10 - it eq - vstmdbeq r0!, {s16-s31} + /*------------ Save Context. ----------- */ + mrs r3, control + mrs r0, psp + isb - /* Save the core registers. */ - mrs r1, control - stmdb r0!, {r1, r4-r11, r14} + add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */ - /* Save the new top of stack into the first member of the TCB. */ - str r0, [r2] + stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */ + ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */ + stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */ - stmdb sp!, {r0, r3} + /*---------- Select next task. --------- */ mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif msr basepri, r0 dsb isb - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif bl vTaskSwitchContext mov r0, #0 msr basepri, r0 - ldmia sp!, {r0, r3} - - /* The first item in pxCurrentTCB is the task top of stack. */ - ldr r1, [r3] - ldr r0, [r1] - /* Move onto the second item in the TCB... */ - add r1, r1, #4 - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - /* Region Base Address register. */ - ldr r2, =0xe000ed9c - /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, {r4-r11} - - #ifdef configTOTAL_MPU_REGIONS - #if ( configTOTAL_MPU_REGIONS == 16 ) - /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - stmia r2, {r4-r11} - /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - stmia r2, {r4-r11} - #endif /* configTOTAL_MPU_REGIONS == 16. */ - #endif /* configTOTAL_MPU_REGIONS */ - - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - - /* Pop the registers that are not automatically saved on exception entry. */ - ldmia r0!, {r3-r11, r14} - msr control, r3 - /* Is the task using the FPU context? If so, pop the high vfp registers - too. */ - tst r14, #0x10 - it eq - vldmiaeq r0!, {s16-s31} + /*------------ Program MPU. ------------ */ + ldr r3, =pxCurrentTCB + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + +#ifdef configTOTAL_MPU_REGIONS + #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ +#endif + + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ msr psp, r0 - isb + stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ + msr control, r3 - bx r14 + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr /*-----------------------------------------------------------*/ +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +vPortSVCHandler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #portSVC_SYSTEM_CALL_ENTER + beq syscall_enter + cmp r2, #portSVC_SYSTEM_CALL_ENTER_1 + beq syscall_enter_1 + cmp r2, #portSVC_SYSTEM_CALL_EXIT + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + vPortSVCHandler: - #ifndef USE_PROCESS_STACK /* Code should not be required if a main() is using the process stack. */ + #ifndef USE_PROCESS_STACK tst lr, #4 ite eq mrseq r0, msp @@ -151,6 +197,7 @@ vPortSVCHandler: #endif b vPortSVCHandler_C +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortStartFirstTask: @@ -176,60 +223,56 @@ vPortStartFirstTask: /*-----------------------------------------------------------*/ vPortRestoreContextOfFirstTask: - /* Use the NVIC offset register to locate the stack. */ - ldr r0, =0xE000ED08 + ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */ ldr r0, [r0] ldr r0, [r0] - /* Set the msp back to the start of the stack. */ - msr msp, r0 - /* Restore the context. */ + msr msp, r0 /* Set the msp back to the start of the stack. */ + + /*------------ Program MPU. ------------ */ ldr r3, =pxCurrentTCB - ldr r1, [r3] - /* The first item in the TCB is the task top of stack. */ - ldr r0, [r1] - /* Move onto the second item in the TCB... */ - add r1, r1, #4 - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [r2] /* Disable MPU. */ - - /* Region Base Address register. */ - ldr r2, =0xe000ed9c - /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, {r4-r11} - - #ifdef configTOTAL_MPU_REGIONS - #if ( configTOTAL_MPU_REGIONS == 16 ) - /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - stmia r2, {r4-r11} - /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - ldmia r1!, {r4-r11} - /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - stmia r2, {r4-r11} - #endif /* configTOTAL_MPU_REGIONS == 16. */ - #endif /* configTOTAL_MPU_REGIONS */ - - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [r2] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - - /* Pop the registers that are not automatically saved on exception entry. */ - ldmia r0!, {r3-r11, r14} - msr control, r3 - /* Restore the task stack pointer. */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + +#ifdef configTOTAL_MPU_REGIONS + #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + #endif /* configTOTAL_MPU_REGIONS == 16. */ +#endif + + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ + + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ msr psp, r0 + stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ + msr control, r3 + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 msr basepri, r0 - bx r14 + bx lr /*-----------------------------------------------------------*/ diff --git a/portable/IAR/ARM_CM4F_MPU/portmacro.h b/portable/IAR/ARM_CM4F_MPU/portmacro.h index 96787e7c31f..4bb8abcdecb 100644 --- a/portable/IAR/ARM_CM4F_MPU/portmacro.h +++ b/portable/IAR/ARM_CM4F_MPU/portmacro.h @@ -195,9 +195,45 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; +typedef struct MPU_REGION_SETTINGS +{ + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; +} xMPU_REGION_SETTINGS; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + +#define MAX_CONTEXT_SIZE 52 + +/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ +#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) +#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -207,9 +243,12 @@ typedef struct MPU_SETTINGS /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ -#define portSVC_START_SCHEDULER 0 -#define portSVC_YIELD 1 -#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_START_SCHEDULER 0 +#define portSVC_YIELD 1 +#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -348,6 +387,16 @@ extern void vResetPrivilege( void ); #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ +extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + #ifndef configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY #warning "configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY is not defined. We recommend defining it to 1 in FreeRTOSConfig.h for better security. https://www.FreeRTOS.org/FreeRTOS-V10.3.x.html" #define configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY 0 diff --git a/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM55/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM55/non_secure/port.c b/portable/IAR/ARM_CM55/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM55/non_secure/port.c +++ b/portable/IAR/ARM_CM55/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM55/non_secure/portasm.s b/portable/IAR/ARM_CM55/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM55/non_secure/portasm.s +++ b/portable/IAR/ARM_CM55/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM55_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM85/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM85/non_secure/port.c b/portable/IAR/ARM_CM85/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM85/non_secure/port.c +++ b/portable/IAR/ARM_CM85/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM85/non_secure/portasm.s b/portable/IAR/ARM_CM85/non_secure/portasm.s index a193cd7b80e..15e74ffc16b 100644 --- a/portable/IAR/ARM_CM85/non_secure/portasm.s +++ b/portable/IAR/ARM_CM85/non_secure/portasm.s @@ -32,12 +32,21 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN xSecureContext EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C EXTERN SecureContext_SaveContext EXTERN SecureContext_LoadContext +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -89,50 +98,81 @@ vPortAllocateSecureContext: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r1 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + + restore_general_regs_first_task: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r3, [r2] /* Read pxCurrentTCB. */ ldr r0, [r3] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r3, #4 /* r3 = r3 + 4. r3 now points to MAIR0 in TCB. */ - ldr r4, [r3] /* r4 = *r3 i.e. r4 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r2] /* Program RNR = 4. */ - adds r3, #4 /* r3 = r3 + 4. r3 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r3!, {r4-r11} /* Read 4 set of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r4} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM, r3 = CONTROL and r4 = EXC_RETURN. */ - ldr r5, =xSecureContext - str r1, [r5] /* Set xSecureContext to this task's value for the same. */ - msr psplim, r2 /* Set this task's PSPLIM value. */ - msr control, r3 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r4 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r3} /* Read from stack - r1 = xSecureContext, r2 = PSPLIM and r3 = EXC_RETURN. */ ldr r4, =xSecureContext str r1, [r4] /* Set xSecureContext to this task's value for the same. */ @@ -145,6 +185,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r3 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -183,6 +224,143 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* Read pxCurrentTCB - Value of pxCurrentTCB must be in r1 as it is used as a parameter later. */ + ldr r2, [r1] /* r2 = Location in TCB where the context should be saved. */ + + cbz r0, save_ns_context /* No secure context to save. */ + save_s_context: + push {r0-r2, lr} + bl SecureContext_SaveContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r0-r2, lr} + + save_ns_context: + mov r3, lr /* r3 = LR (EXC_RETURN). */ + lsls r3, r3, #25 /* r3 = r3 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi save_special_regs /* r3 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + save_general_regs: + mrs r3, psp + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r3, r3, #0x20 /* Move r3 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r2!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r3, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r2!, {s0-s16} /* Store hardware saved FP context. */ + sub r3, r3, #0x20 /* Set r3 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r2!, {r4-r11} /* Store r4-r11. */ + ldmia r3, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r2!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psp /* r3 = PSP. */ + mrs r4, psplim /* r4 = PSPLIM. */ + mrs r5, control /* r5 = CONTROL. */ + stmia r2!, {r0, r3-r5, lr} /* Store xSecureContext, original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r2, [r1] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r3] /* r0 = pxCurrentTCB.*/ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r1, [r3] /* r1 = pxCurrentTCB.*/ + ldr r2, [r1] /* r2 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r2!, {r0, r3-r5, lr} /* r0 = xSecureContext, r3 = original PSP, r4 = PSPLIM, r5 = CONTROL, LR restored. */ + msr psp, r3 + msr psplim, r4 + msr control, r5 + ldr r4, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ + str r0, [r4] /* Restore xSecureContext. */ + cbz r0, restore_ns_context /* No secure context to restore. */ + + restore_s_context: + push {r1-r3, lr} + bl SecureContext_LoadContext /* Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ + pop {r1-r3, lr} + + restore_ns_context: + mov r0, lr /* r0 = LR (EXC_RETURN). */ + lsls r0, r0, #25 /* r0 = r0 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ + bmi restore_context_done /* r0 < 0 ==> Bit[6] in EXC_RETURN is 1 ==> secure stack was used to store the stack frame. */ + + restore_general_regs: + ldmdb r2!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r3!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r2!, {r4-r11} /* r4-r11 restored. */ + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r2!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r3!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r2!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r2, [r1] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ ldr r0, [r3] /* Read xSecureContext - Value of xSecureContext must be in r0 as it is used as a parameter later. */ @@ -200,20 +378,11 @@ PendSV_Handler: ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r3] /* Read pxCurrentTCB. */ -#if ( configENABLE_MPU == 1 ) - subs r2, r2, #16 /* Make space for xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ -#else /* configENABLE_MPU */ subs r2, r2, #12 /* Make space for xSecureContext, PSPLIM and LR on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ mrs r1, psplim /* r1 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ -#endif /* configENABLE_MPU */ b select_next_task save_ns_context: @@ -224,17 +393,6 @@ PendSV_Handler: it eq vstmdbeq r2!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - subs r2, r2, #48 /* Make space for xSecureContext, PSPLIM, CONTROL, LR and the remaining registers on the stack. */ - str r2, [r1] /* Save the new top of stack in TCB. */ - adds r2, r2, #16 /* r2 = r2 + 16. */ - stm r2, {r4-r11} /* Store the registers that are not saved automatically. */ - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r3, control /* r3 = CONTROL. */ - mov r4, lr /* r4 = LR/EXC_RETURN. */ - subs r2, r2, #16 /* r2 = r2 - 16. */ - stmia r2!, {r0, r1, r3, r4} /* Store xSecureContext, PSPLIM, CONTROL and LR on the stack. */ - #else /* configENABLE_MPU */ subs r2, r2, #44 /* Make space for xSecureContext, PSPLIM, LR and the remaining registers on the stack. */ str r2, [r1] /* Save the new top of stack in TCB. */ adds r2, r2, #12 /* r2 = r2 + 12. */ @@ -243,7 +401,6 @@ PendSV_Handler: mov r3, lr /* r3 = LR/EXC_RETURN. */ subs r2, r2, #12 /* r2 = r2 - 12. */ stmia r2!, {r0, r1, r3} /* Store xSecureContext, PSPLIM and LR on the stack. */ - #endif /* configENABLE_MPU */ select_next_task: mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY @@ -258,51 +415,6 @@ PendSV_Handler: ldr r1, [r3] /* Read pxCurrentTCB. */ ldr r2, [r1] /* The first item in pxCurrentTCB is the task top of stack. r2 now points to the top of stack. */ - #if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r3] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r4, [r1] /* r4 = *r1 i.e. r4 = MAIR0. */ - ldr r3, =0xe000edc0 /* r3 = 0xe000edc0 [Location of MAIR0]. */ - str r4, [r3] /* Program MAIR0. */ - ldr r3, =0xe000ed98 /* r3 = 0xe000ed98 [Location of RNR]. */ - movs r4, #4 /* r4 = 4. */ - str r4, [r3] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r3, =0xe000ed9c /* r3 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r3!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r3, =0xe000ed94 /* r3 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r3] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r3] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - #endif /* configENABLE_MPU */ - - #if ( configENABLE_MPU == 1 ) - ldmia r2!, {r0, r1, r3, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM, r3 = CONTROL and r4 = LR. */ - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r3 /* Restore the CONTROL register value for the task. */ - mov lr, r4 /* LR = r4. */ - ldr r3, =xSecureContext /* Read the location of xSecureContext i.e. &( xSecureContext ). */ - str r0, [r3] /* Restore the task's xSecureContext. */ - cbz r0, restore_ns_context /* If there is no secure context for the task, restore the non-secure context. */ - ldr r3, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ - ldr r1, [r3] /* Read pxCurrentTCB. */ - push {r2, r4} - bl SecureContext_LoadContext /* Restore the secure context. Params are in r0 and r1. r0 = xSecureContext and r1 = pxCurrentTCB. */ - pop {r2, r4} - mov lr, r4 /* LR = r4. */ - lsls r1, r4, #25 /* r1 = r4 << 25. Bit[6] of EXC_RETURN is 1 if secure stack was used, 0 if non-secure stack was used to store stack frame. */ - bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ - msr psp, r2 /* Remember the new top of stack for the task. */ - bx lr - #else /* configENABLE_MPU */ ldmia r2!, {r0, r1, r4} /* Read from stack - r0 = xSecureContext, r1 = PSPLIM and r4 = LR. */ msr psplim, r1 /* Restore the PSPLIM register value for the task. */ mov lr, r4 /* LR = r4. */ @@ -319,7 +431,6 @@ PendSV_Handler: bpl restore_ns_context /* bpl - branch if positive or zero. If r1 >= 0 ==> Bit[6] in EXC_RETURN is 0 i.e. non-secure stack was used. */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr - #endif /* configENABLE_MPU */ restore_ns_context: ldmia r2!, {r4-r11} /* Restore the registers that are not automatically restored. */ @@ -330,14 +441,50 @@ PendSV_Handler: #endif /* configENABLE_FPU || configENABLE_MVE */ msr psp, r2 /* Remember the new top of stack for the task. */ bx lr + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ vPortFreeSecureContext: diff --git a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S b/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S new file mode 100644 index 00000000000..f051a6073dd --- /dev/null +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/mpu_wrappers_v2_asm.S @@ -0,0 +1,1552 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + + SECTION freertos_system_calls:CODE:NOROOT(2) + THUMB +/*-----------------------------------------------------------*/ + +#include "FreeRTOSConfig.h" + +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + +/* These must be in sync with portmacro.h. */ +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + PUBLIC MPU_xTaskDelayUntil +MPU_xTaskDelayUntil: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv + MPU_xTaskDelayUntil_Priv: + pop {r0} + b MPU_xTaskDelayUntilImpl + MPU_xTaskDelayUntil_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskAbortDelay +MPU_xTaskAbortDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv + MPU_xTaskAbortDelay_Priv: + pop {r0} + b MPU_xTaskAbortDelayImpl + MPU_xTaskAbortDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskDelay +MPU_vTaskDelay: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv + MPU_vTaskDelay_Priv: + pop {r0} + b MPU_vTaskDelayImpl + MPU_vTaskDelay_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskPriorityGet +MPU_uxTaskPriorityGet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv + MPU_uxTaskPriorityGet_Priv: + pop {r0} + b MPU_uxTaskPriorityGetImpl + MPU_uxTaskPriorityGet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_eTaskGetState +MPU_eTaskGetState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv + MPU_eTaskGetState_Priv: + pop {r0} + b MPU_eTaskGetStateImpl + MPU_eTaskGetState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskGetInfo +MPU_vTaskGetInfo: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv + MPU_vTaskGetInfo_Priv: + pop {r0} + b MPU_vTaskGetInfoImpl + MPU_vTaskGetInfo_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetIdleTaskHandle +MPU_xTaskGetIdleTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv + MPU_xTaskGetIdleTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl + MPU_xTaskGetIdleTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSuspend +MPU_vTaskSuspend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv + MPU_vTaskSuspend_Priv: + pop {r0} + b MPU_vTaskSuspendImpl + MPU_vTaskSuspend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskResume +MPU_vTaskResume: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv + MPU_vTaskResume_Priv: + pop {r0} + b MPU_vTaskResumeImpl + MPU_vTaskResume_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetTickCount +MPU_xTaskGetTickCount: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv + MPU_xTaskGetTickCount_Priv: + pop {r0} + b MPU_xTaskGetTickCountImpl + MPU_xTaskGetTickCount_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetNumberOfTasks +MPU_uxTaskGetNumberOfTasks: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv + MPU_uxTaskGetNumberOfTasks_Priv: + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl + MPU_uxTaskGetNumberOfTasks_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTaskGetName +MPU_pcTaskGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv + MPU_pcTaskGetName_Priv: + pop {r0} + b MPU_pcTaskGetNameImpl + MPU_pcTaskGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimeCounter +MPU_ulTaskGetRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv + MPU_ulTaskGetRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl + MPU_ulTaskGetRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetRunTimePercent +MPU_ulTaskGetRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv + MPU_ulTaskGetRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl + MPU_ulTaskGetRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimePercent +MPU_ulTaskGetIdleRunTimePercent: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv + MPU_ulTaskGetIdleRunTimePercent_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl + MPU_ulTaskGetIdleRunTimePercent_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGetIdleRunTimeCounter +MPU_ulTaskGetIdleRunTimeCounter: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv + MPU_ulTaskGetIdleRunTimeCounter_Priv: + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl + MPU_ulTaskGetIdleRunTimeCounter_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetApplicationTaskTag +MPU_vTaskSetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv + MPU_vTaskSetApplicationTaskTag_Priv: + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl + MPU_vTaskSetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetApplicationTaskTag +MPU_xTaskGetApplicationTaskTag: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv + MPU_xTaskGetApplicationTaskTag_Priv: + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl + MPU_xTaskGetApplicationTaskTag_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetThreadLocalStoragePointer +MPU_vTaskSetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv + MPU_vTaskSetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl + MPU_vTaskSetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTaskGetThreadLocalStoragePointer +MPU_pvTaskGetThreadLocalStoragePointer: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + MPU_pvTaskGetThreadLocalStoragePointer_Priv: + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl + MPU_pvTaskGetThreadLocalStoragePointer_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetSystemState +MPU_uxTaskGetSystemState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv + MPU_uxTaskGetSystemState_Priv: + pop {r0} + b MPU_uxTaskGetSystemStateImpl + MPU_uxTaskGetSystemState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark +MPU_uxTaskGetStackHighWaterMark: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv + MPU_uxTaskGetStackHighWaterMark_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl + MPU_uxTaskGetStackHighWaterMark_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTaskGetStackHighWaterMark2 +MPU_uxTaskGetStackHighWaterMark2: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv + MPU_uxTaskGetStackHighWaterMark2_Priv: + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl + MPU_uxTaskGetStackHighWaterMark2_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetCurrentTaskHandle +MPU_xTaskGetCurrentTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv + MPU_xTaskGetCurrentTaskHandle_Priv: + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl + MPU_xTaskGetCurrentTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGetSchedulerState +MPU_xTaskGetSchedulerState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv + MPU_xTaskGetSchedulerState_Priv: + pop {r0} + b MPU_xTaskGetSchedulerStateImpl + MPU_xTaskGetSchedulerState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTaskSetTimeOutState +MPU_vTaskSetTimeOutState: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv + MPU_vTaskSetTimeOutState_Priv: + pop {r0} + b MPU_vTaskSetTimeOutStateImpl + MPU_vTaskSetTimeOutState_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskCheckForTimeOut +MPU_xTaskCheckForTimeOut: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv + MPU_xTaskCheckForTimeOut_Priv: + pop {r0} + b MPU_xTaskCheckForTimeOutImpl + MPU_xTaskCheckForTimeOut_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotify +MPU_xTaskGenericNotify: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv + MPU_xTaskGenericNotify_Priv: + pop {r0} + b MPU_xTaskGenericNotifyImpl + MPU_xTaskGenericNotify_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyWait +MPU_xTaskGenericNotifyWait: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv + MPU_xTaskGenericNotifyWait_Priv: + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl + MPU_xTaskGenericNotifyWait_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyTake +MPU_ulTaskGenericNotifyTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv + MPU_ulTaskGenericNotifyTake_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl + MPU_ulTaskGenericNotifyTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTaskGenericNotifyStateClear +MPU_xTaskGenericNotifyStateClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv + MPU_xTaskGenericNotifyStateClear_Priv: + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl + MPU_xTaskGenericNotifyStateClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_ulTaskGenericNotifyValueClear +MPU_ulTaskGenericNotifyValueClear: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv + MPU_ulTaskGenericNotifyValueClear_Priv: + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl + MPU_ulTaskGenericNotifyValueClear_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGenericSend +MPU_xQueueGenericSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv + MPU_xQueueGenericSend_Priv: + pop {r0} + b MPU_xQueueGenericSendImpl + MPU_xQueueGenericSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueMessagesWaiting +MPU_uxQueueMessagesWaiting: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv + MPU_uxQueueMessagesWaiting_Priv: + pop {r0} + b MPU_uxQueueMessagesWaitingImpl + MPU_uxQueueMessagesWaiting_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxQueueSpacesAvailable +MPU_uxQueueSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv + MPU_uxQueueSpacesAvailable_Priv: + pop {r0} + b MPU_uxQueueSpacesAvailableImpl + MPU_uxQueueSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueReceive +MPU_xQueueReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv + MPU_xQueueReceive_Priv: + pop {r0} + b MPU_xQueueReceiveImpl + MPU_xQueueReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueuePeek +MPU_xQueuePeek: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv + MPU_xQueuePeek_Priv: + pop {r0} + b MPU_xQueuePeekImpl + MPU_xQueuePeek_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSemaphoreTake +MPU_xQueueSemaphoreTake: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv + MPU_xQueueSemaphoreTake_Priv: + pop {r0} + b MPU_xQueueSemaphoreTakeImpl + MPU_xQueueSemaphoreTake_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGetMutexHolder +MPU_xQueueGetMutexHolder: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv + MPU_xQueueGetMutexHolder_Priv: + pop {r0} + b MPU_xQueueGetMutexHolderImpl + MPU_xQueueGetMutexHolder_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueTakeMutexRecursive +MPU_xQueueTakeMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv + MPU_xQueueTakeMutexRecursive_Priv: + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl + MPU_xQueueTakeMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueGiveMutexRecursive +MPU_xQueueGiveMutexRecursive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv + MPU_xQueueGiveMutexRecursive_Priv: + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl + MPU_xQueueGiveMutexRecursive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueSelectFromSet +MPU_xQueueSelectFromSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv + MPU_xQueueSelectFromSet_Priv: + pop {r0} + b MPU_xQueueSelectFromSetImpl + MPU_xQueueSelectFromSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xQueueAddToSet +MPU_xQueueAddToSet: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv + MPU_xQueueAddToSet_Priv: + pop {r0} + b MPU_xQueueAddToSetImpl + MPU_xQueueAddToSet_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueAddToRegistry +MPU_vQueueAddToRegistry: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv + MPU_vQueueAddToRegistry_Priv: + pop {r0} + b MPU_vQueueAddToRegistryImpl + MPU_vQueueAddToRegistry_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vQueueUnregisterQueue +MPU_vQueueUnregisterQueue: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv + MPU_vQueueUnregisterQueue_Priv: + pop {r0} + b MPU_vQueueUnregisterQueueImpl + MPU_vQueueUnregisterQueue_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcQueueGetName +MPU_pcQueueGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv + MPU_pcQueueGetName_Priv: + pop {r0} + b MPU_pcQueueGetNameImpl + MPU_pcQueueGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pvTimerGetTimerID +MPU_pvTimerGetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv + MPU_pvTimerGetTimerID_Priv: + pop {r0} + b MPU_pvTimerGetTimerIDImpl + MPU_pvTimerGetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetTimerID +MPU_vTimerSetTimerID: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv + MPU_vTimerSetTimerID_Priv: + pop {r0} + b MPU_vTimerSetTimerIDImpl + MPU_vTimerSetTimerID_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerIsTimerActive +MPU_xTimerIsTimerActive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv + MPU_xTimerIsTimerActive_Priv: + pop {r0} + b MPU_xTimerIsTimerActiveImpl + MPU_xTimerIsTimerActive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetTimerDaemonTaskHandle +MPU_xTimerGetTimerDaemonTaskHandle: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + MPU_xTimerGetTimerDaemonTaskHandle_Priv: + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl + MPU_xTimerGetTimerDaemonTaskHandle_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGenericCommand +MPU_xTimerGenericCommand: + push {r0} + /* This function can be called from ISR also and therefore, we need a check + * to take privileged path, if called from ISR. */ + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv + MPU_xTimerGenericCommand_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr + MPU_xTimerGenericCommand_Priv: + pop {r0} + b MPU_xTimerGenericCommandImpl + +/*-----------------------------------------------------------*/ + + PUBLIC MPU_pcTimerGetName +MPU_pcTimerGetName: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv + MPU_pcTimerGetName_Priv: + pop {r0} + b MPU_pcTimerGetNameImpl + MPU_pcTimerGetName_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vTimerSetReloadMode +MPU_vTimerSetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv + MPU_vTimerSetReloadMode_Priv: + pop {r0} + b MPU_vTimerSetReloadModeImpl + MPU_vTimerSetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetReloadMode +MPU_xTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv + MPU_xTimerGetReloadMode_Priv: + pop {r0} + b MPU_xTimerGetReloadModeImpl + MPU_xTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxTimerGetReloadMode +MPU_uxTimerGetReloadMode: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv + MPU_uxTimerGetReloadMode_Priv: + pop {r0} + b MPU_uxTimerGetReloadModeImpl + MPU_uxTimerGetReloadMode_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetPeriod +MPU_xTimerGetPeriod: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv + MPU_xTimerGetPeriod_Priv: + pop {r0} + b MPU_xTimerGetPeriodImpl + MPU_xTimerGetPeriod_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xTimerGetExpiryTime +MPU_xTimerGetExpiryTime: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv + MPU_xTimerGetExpiryTime_Priv: + pop {r0} + b MPU_xTimerGetExpiryTimeImpl + MPU_xTimerGetExpiryTime_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupWaitBits +MPU_xEventGroupWaitBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv + MPU_xEventGroupWaitBits_Priv: + pop {r0} + b MPU_xEventGroupWaitBitsImpl + MPU_xEventGroupWaitBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupClearBits +MPU_xEventGroupClearBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv + MPU_xEventGroupClearBits_Priv: + pop {r0} + b MPU_xEventGroupClearBitsImpl + MPU_xEventGroupClearBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSetBits +MPU_xEventGroupSetBits: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv + MPU_xEventGroupSetBits_Priv: + pop {r0} + b MPU_xEventGroupSetBitsImpl + MPU_xEventGroupSetBits_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xEventGroupSync +MPU_xEventGroupSync: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv + MPU_xEventGroupSync_Priv: + pop {r0} + b MPU_xEventGroupSyncImpl + MPU_xEventGroupSync_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_uxEventGroupGetNumber +MPU_uxEventGroupGetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv + MPU_uxEventGroupGetNumber_Priv: + pop {r0} + b MPU_uxEventGroupGetNumberImpl + MPU_uxEventGroupGetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_vEventGroupSetNumber +MPU_vEventGroupSetNumber: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv + MPU_vEventGroupSetNumber_Priv: + pop {r0} + b MPU_vEventGroupSetNumberImpl + MPU_vEventGroupSetNumber_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSend +MPU_xStreamBufferSend: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv + MPU_xStreamBufferSend_Priv: + pop {r0} + b MPU_xStreamBufferSendImpl + MPU_xStreamBufferSend_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferReceive +MPU_xStreamBufferReceive: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv + MPU_xStreamBufferReceive_Priv: + pop {r0} + b MPU_xStreamBufferReceiveImpl + MPU_xStreamBufferReceive_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsFull +MPU_xStreamBufferIsFull: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv + MPU_xStreamBufferIsFull_Priv: + pop {r0} + b MPU_xStreamBufferIsFullImpl + MPU_xStreamBufferIsFull_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferIsEmpty +MPU_xStreamBufferIsEmpty: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv + MPU_xStreamBufferIsEmpty_Priv: + pop {r0} + b MPU_xStreamBufferIsEmptyImpl + MPU_xStreamBufferIsEmpty_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSpacesAvailable +MPU_xStreamBufferSpacesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv + MPU_xStreamBufferSpacesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl + MPU_xStreamBufferSpacesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferBytesAvailable +MPU_xStreamBufferBytesAvailable: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv + MPU_xStreamBufferBytesAvailable_Priv: + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl + MPU_xStreamBufferBytesAvailable_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferSetTriggerLevel +MPU_xStreamBufferSetTriggerLevel: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv + MPU_xStreamBufferSetTriggerLevel_Priv: + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl + MPU_xStreamBufferSetTriggerLevel_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + + PUBLIC MPU_xStreamBufferNextMessageLengthBytes +MPU_xStreamBufferNextMessageLengthBytes: + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv + MPU_xStreamBufferNextMessageLengthBytes_Priv: + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl + MPU_xStreamBufferNextMessageLengthBytes_Unpriv: + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +/*-----------------------------------------------------------*/ + +/* Default weak implementations in case one is not available from + * mpu_wrappers because of config options. */ + + PUBWEAK MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntilImpl: + b MPU_xTaskDelayUntilImpl + + PUBWEAK MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelayImpl: + b MPU_xTaskAbortDelayImpl + + PUBWEAK MPU_vTaskDelayImpl +MPU_vTaskDelayImpl: + b MPU_vTaskDelayImpl + + PUBWEAK MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGetImpl: + b MPU_uxTaskPriorityGetImpl + + PUBWEAK MPU_eTaskGetStateImpl +MPU_eTaskGetStateImpl: + b MPU_eTaskGetStateImpl + + PUBWEAK MPU_vTaskGetInfoImpl +MPU_vTaskGetInfoImpl: + b MPU_vTaskGetInfoImpl + + PUBWEAK MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandleImpl: + b MPU_xTaskGetIdleTaskHandleImpl + + PUBWEAK MPU_vTaskSuspendImpl +MPU_vTaskSuspendImpl: + b MPU_vTaskSuspendImpl + + PUBWEAK MPU_vTaskResumeImpl +MPU_vTaskResumeImpl: + b MPU_vTaskResumeImpl + + PUBWEAK MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCountImpl: + b MPU_xTaskGetTickCountImpl + + PUBWEAK MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasksImpl: + b MPU_uxTaskGetNumberOfTasksImpl + + PUBWEAK MPU_pcTaskGetNameImpl +MPU_pcTaskGetNameImpl: + b MPU_pcTaskGetNameImpl + + PUBWEAK MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounterImpl: + b MPU_ulTaskGetRunTimeCounterImpl + + PUBWEAK MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercentImpl: + b MPU_ulTaskGetRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercentImpl: + b MPU_ulTaskGetIdleRunTimePercentImpl + + PUBWEAK MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounterImpl: + b MPU_ulTaskGetIdleRunTimeCounterImpl + + PUBWEAK MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTagImpl: + b MPU_vTaskSetApplicationTaskTagImpl + + PUBWEAK MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTagImpl: + b MPU_xTaskGetApplicationTaskTagImpl + + PUBWEAK MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointerImpl: + b MPU_vTaskSetThreadLocalStoragePointerImpl + + PUBWEAK MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointerImpl: + b MPU_pvTaskGetThreadLocalStoragePointerImpl + + PUBWEAK MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemStateImpl: + b MPU_uxTaskGetSystemStateImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMarkImpl: + b MPU_uxTaskGetStackHighWaterMarkImpl + + PUBWEAK MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2Impl: + b MPU_uxTaskGetStackHighWaterMark2Impl + + PUBWEAK MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandleImpl: + b MPU_xTaskGetCurrentTaskHandleImpl + + PUBWEAK MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerStateImpl: + b MPU_xTaskGetSchedulerStateImpl + + PUBWEAK MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutStateImpl: + b MPU_vTaskSetTimeOutStateImpl + + PUBWEAK MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOutImpl: + b MPU_xTaskCheckForTimeOutImpl + + PUBWEAK MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotifyImpl: + b MPU_xTaskGenericNotifyImpl + + PUBWEAK MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWaitImpl: + b MPU_xTaskGenericNotifyWaitImpl + + PUBWEAK MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTakeImpl: + b MPU_ulTaskGenericNotifyTakeImpl + + PUBWEAK MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClearImpl: + b MPU_xTaskGenericNotifyStateClearImpl + + PUBWEAK MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClearImpl: + b MPU_ulTaskGenericNotifyValueClearImpl + + PUBWEAK MPU_xQueueGenericSendImpl +MPU_xQueueGenericSendImpl: + b MPU_xQueueGenericSendImpl + + PUBWEAK MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaitingImpl: + b MPU_uxQueueMessagesWaitingImpl + + PUBWEAK MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailableImpl: + b MPU_uxQueueSpacesAvailableImpl + + PUBWEAK MPU_xQueueReceiveImpl +MPU_xQueueReceiveImpl: + b MPU_xQueueReceiveImpl + + PUBWEAK MPU_xQueuePeekImpl +MPU_xQueuePeekImpl: + b MPU_xQueuePeekImpl + + PUBWEAK MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTakeImpl: + b MPU_xQueueSemaphoreTakeImpl + + PUBWEAK MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolderImpl: + b MPU_xQueueGetMutexHolderImpl + + PUBWEAK MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursiveImpl: + b MPU_xQueueTakeMutexRecursiveImpl + + PUBWEAK MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursiveImpl: + b MPU_xQueueGiveMutexRecursiveImpl + + PUBWEAK MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSetImpl: + b MPU_xQueueSelectFromSetImpl + + PUBWEAK MPU_xQueueAddToSetImpl +MPU_xQueueAddToSetImpl: + b MPU_xQueueAddToSetImpl + + PUBWEAK MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistryImpl: + b MPU_vQueueAddToRegistryImpl + + PUBWEAK MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueueImpl: + b MPU_vQueueUnregisterQueueImpl + + PUBWEAK MPU_pcQueueGetNameImpl +MPU_pcQueueGetNameImpl: + b MPU_pcQueueGetNameImpl + + PUBWEAK MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerIDImpl: + b MPU_pvTimerGetTimerIDImpl + + PUBWEAK MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerIDImpl: + b MPU_vTimerSetTimerIDImpl + + PUBWEAK MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActiveImpl: + b MPU_xTimerIsTimerActiveImpl + + PUBWEAK MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandleImpl: + b MPU_xTimerGetTimerDaemonTaskHandleImpl + + PUBWEAK MPU_xTimerGenericCommandImpl +MPU_xTimerGenericCommandImpl: + b MPU_xTimerGenericCommandImpl + + PUBWEAK MPU_pcTimerGetNameImpl +MPU_pcTimerGetNameImpl: + b MPU_pcTimerGetNameImpl + + PUBWEAK MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadModeImpl: + b MPU_vTimerSetReloadModeImpl + + PUBWEAK MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadModeImpl: + b MPU_xTimerGetReloadModeImpl + + PUBWEAK MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadModeImpl: + b MPU_uxTimerGetReloadModeImpl + + PUBWEAK MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriodImpl: + b MPU_xTimerGetPeriodImpl + + PUBWEAK MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTimeImpl: + b MPU_xTimerGetExpiryTimeImpl + + PUBWEAK MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBitsImpl: + b MPU_xEventGroupWaitBitsImpl + + PUBWEAK MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBitsImpl: + b MPU_xEventGroupClearBitsImpl + + PUBWEAK MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBitsImpl: + b MPU_xEventGroupSetBitsImpl + + PUBWEAK MPU_xEventGroupSyncImpl +MPU_xEventGroupSyncImpl: + b MPU_xEventGroupSyncImpl + + PUBWEAK MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumberImpl: + b MPU_uxEventGroupGetNumberImpl + + PUBWEAK MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumberImpl: + b MPU_vEventGroupSetNumberImpl + + PUBWEAK MPU_xStreamBufferSendImpl +MPU_xStreamBufferSendImpl: + b MPU_xStreamBufferSendImpl + + PUBWEAK MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceiveImpl: + b MPU_xStreamBufferReceiveImpl + + PUBWEAK MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFullImpl: + b MPU_xStreamBufferIsFullImpl + + PUBWEAK MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmptyImpl: + b MPU_xStreamBufferIsEmptyImpl + + PUBWEAK MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailableImpl: + b MPU_xStreamBufferSpacesAvailableImpl + + PUBWEAK MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailableImpl: + b MPU_xStreamBufferBytesAvailableImpl + + PUBWEAK MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevelImpl: + b MPU_xStreamBufferSetTriggerLevelImpl + + PUBWEAK MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytesImpl: + b MPU_xStreamBufferNextMessageLengthBytesImpl + +/*-----------------------------------------------------------*/ + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + + END diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c index 88c45048fa1..cab1b3668bf 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/port.c +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/port.c @@ -107,6 +107,13 @@ #define portPRIGROUP_SHIFT ( 8UL ) /*-----------------------------------------------------------*/ +/** + * @brief Constants used during system call enter and exit. + */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the FPU. */ @@ -123,6 +130,14 @@ #define portFPCCR_LSPEN_MASK ( 1UL << portFPCCR_LSPEN_POS ) /*-----------------------------------------------------------*/ +/** + * @brief Offsets in the stack to the parameters when inside the SVC handler. + */ +#define portOFFSET_TO_LR ( 5 ) +#define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) +/*-----------------------------------------------------------*/ + /** * @brief Constants required to manipulate the MPU. */ @@ -148,6 +163,8 @@ #define portMPU_RBAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ #define portMPU_RLAR_ADDRESS_MASK ( 0xffffffe0 ) /* Must be 32-byte aligned. */ +#define portMPU_RBAR_ACCESS_PERMISSIONS_MASK ( 3UL << 1UL ) + #define portMPU_MAIR_ATTR0_POS ( 0UL ) #define portMPU_MAIR_ATTR0_MASK ( 0x000000ff ) @@ -191,6 +208,30 @@ /* Expected value of the portMPU_TYPE register. */ #define portEXPECTED_MPU_TYPE_VALUE ( configTOTAL_MPU_REGIONS << 8UL ) + +/* Extract first address of the MPU region as encoded in the + * RBAR (Region Base Address Register) value. */ +#define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \ + ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK ) + +/* Extract last address of the MPU region as encoded in the + * RLAR (Region Limit Address Register) value. */ +#define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \ + ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK ) + +/* Does addr lies within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) /*-----------------------------------------------------------*/ /** @@ -311,6 +352,19 @@ static void prvTaskExitError( void ); #if ( configENABLE_MPU == 1 ) +/** + * @brief Extract MPU region's access permissions from the Region Base Address + * Register (RBAR) value. + * + * @param ulRBARValue RBAR value for the MPU region. + * + * @return uint32_t Access permissions. + */ + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION; +#endif /* configENABLE_MPU */ + +#if ( configENABLE_MPU == 1 ) + /** * @brief Setup the Memory Protection Unit (MPU). */ @@ -365,6 +419,60 @@ void SysTick_Handler( void ) PRIVILEGED_FUNCTION; * @brief C part of SVC handler. */ portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION; + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configENABLE_MPU == 1 ) + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -681,6 +789,26 @@ static void prvTaskExitError( void ) } /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */ + { + uint32_t ulAccessPermissions = 0; + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY ) + { + ulAccessPermissions = tskMPU_READ_PERMISSION; + } + + if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE ) + { + ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } + + return ulAccessPermissions; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + #if ( configENABLE_MPU == 1 ) static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */ { @@ -853,7 +981,7 @@ void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */ { - #if ( configENABLE_MPU == 1 ) + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) #if defined( __ARMCC_VERSION ) /* Declaration when these variable are defined in code instead of being @@ -865,7 +993,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO extern uint32_t __syscalls_flash_start__[]; extern uint32_t __syscalls_flash_end__[]; #endif /* defined( __ARMCC_VERSION ) */ - #endif /* configENABLE_MPU */ + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ uint32_t ulPC; @@ -880,7 +1008,7 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO /* Register are stored on the stack in the following order - R0, R1, R2, R3, * R12, LR, PC, xPSR. */ - ulPC = pulCallerStackAddress[ 6 ]; + ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ]; ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ]; switch( ucSVCNumber ) @@ -951,18 +1079,18 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO vRestoreContextOfFirstTask(); break; - #if ( configENABLE_MPU == 1 ) - case portSVC_RAISE_PRIVILEGE: + #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) ) + case portSVC_RAISE_PRIVILEGE: - /* Only raise the privilege, if the svc was raised from any of - * the system calls. */ - if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && - ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) - { - vRaisePrivilege(); - } - break; - #endif /* configENABLE_MPU */ + /* Only raise the privilege, if the svc was raised from any of + * the system calls. */ + if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + vRaisePrivilege(); + } + break; + #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */ default: /* Incorrect SVC call. */ @@ -971,51 +1099,455 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO } /*-----------------------------------------------------------*/ -/* *INDENT-OFF* */ -#if ( configENABLE_MPU == 1 ) - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters, - BaseType_t xRunPrivileged ) /* PRIVILEGED_FUNCTION */ -#else - StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, - StackType_t * pxEndOfStack, - TaskFunction_t pxCode, - void * pvParameters ) /* PRIVILEGED_FUNCTION */ -#endif /* configENABLE_MPU */ -/* *INDENT-ON* */ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - #if ( portPRELOAD_REGISTERS == 0 ) + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) { - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ - pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ - *pxTopOfStack = portINITIAL_EXC_RETURN; + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; - #if ( configENABLE_MPU == 1 ) + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) { - pxTopOfStack--; + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); - if( xRunPrivileged == pdTRUE ) + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); } else { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; } } - #endif /* configENABLE_MPU */ + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Store the value of the LR and PSPLIM registers before the SVC was raised. + * We need to restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* Use the pulSystemCallStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) ); + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) ); + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + + /* Raise the privilege for the duration of the system call. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " bics r0, r1 \n" /* Clear nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i; + #if defined( __ARMCC_VERSION ) + /* Declaration when these variable are defined in code instead of being + * exported from linker scripts. */ + extern uint32_t * __syscalls_flash_start__; + extern uint32_t * __syscalls_flash_end__; + #else + /* Declaration when these variable are exported from linker scripts. */ + extern uint32_t __syscalls_flash_start__[]; + extern uint32_t __syscalls_flash_end__[]; + #endif /* #if defined( __ARMCC_VERSION ) */ + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + { + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + __asm volatile ( + " vpush {s0} \n" /* Trigger lazy stacking. */ + " vpop {s0} \n" /* Nullify the affect of the above instruction. */ + ::: "memory" + ); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + } + #else + { + ulStackFrameSize = 8; + } + #endif /* configENABLE_FPU || configENABLE_MVE */ + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) ); + + /* Restore the LR and PSPLIM to what they were at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) ); + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + + /* Drop the privilege before returning to the thread mode. */ + __asm volatile ( + " mrs r0, control \n" /* Obtain current control value. */ + " movs r1, #1 \n" /* r1 = 1. */ + " orrs r0, r1 \n" /* Set nPRIV bit. */ + " msr control, r0 \n" /* Write back new control value. */ + ::: "r0", "r1", "memory" + ); + } +} + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configENABLE_MPU == 1 ) + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + +#if( configENABLE_MPU == 1 ) + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */ +{ + uint32_t ulIndex = 0; + + xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */ + ulIndex++; + + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */ + ulIndex++; + + #if ( configENABLE_TRUSTZONE == 1 ) + { + xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */ + ulIndex++; + } + #endif /* configENABLE_TRUSTZONE */ + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + ulIndex++; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */ + ulIndex++; + if( xRunPrivileged == pdTRUE ) + { + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */ + ulIndex++; + } + else + { + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */ + ulIndex++; + } + xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */ + ulIndex++; + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) + + ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + return &( xMPUSettings->ulContext[ ulIndex ] ); +} + +#else /* configENABLE_MPU */ + +StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) /* PRIVILEGED_FUNCTION */ +{ + /* Simulate the stack frame as it would be created by a context switch + * interrupt. */ + #if ( portPRELOAD_REGISTERS == 0 ) + { + pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ + pxTopOfStack--; + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ + pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ + pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */ + *pxTopOfStack = portINITIAL_EXC_RETURN; pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1029,55 +1561,39 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO #else /* portPRELOAD_REGISTERS */ { pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ + *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pxCode; /* PC */ + *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */ + *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12 */ + *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3 */ + *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2 */ + *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1 */ + *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ + *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11 */ + *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10 */ + *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09 */ + *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08 */ + *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07 */ + *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06 */ + *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05 */ + *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */ pxTopOfStack--; - *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04 */ + *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */ pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN */ - - #if ( configENABLE_MPU == 1 ) - { - pxTopOfStack--; - - if( xRunPrivileged == pdTRUE ) - { - *pxTopOfStack = portINITIAL_CONTROL_PRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - else - { - *pxTopOfStack = portINITIAL_CONTROL_UNPRIVILEGED; /* Slot used to hold this task's CONTROL value. */ - } - } - #endif /* configENABLE_MPU */ - + *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ pxTopOfStack--; *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */ @@ -1092,6 +1608,8 @@ void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTIO return pxTopOfStack; } + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */ @@ -1347,6 +1865,54 @@ void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */ #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + + { + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS; i++ ) + { + /* Is the MPU region enabled? */ + if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ), + portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) && + portIS_AUTHORIZED( ulAccessRequested, + prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + } + + return xAccessGranted; + } +#endif /* configENABLE_MPU */ +/*-----------------------------------------------------------*/ + BaseType_t xPortIsInsideInterrupt( void ) { uint32_t ulCurrentInterrupt; diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s index 581b84d4951..ec52025270b 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portasm.s @@ -32,9 +32,18 @@ the code is included in C files but excluded by the preprocessor in assembly files (__ICCARM__ is defined by the IAR C compiler but not by the IAR assembler. */ #include "FreeRTOSConfig.h" +#ifndef configUSE_MPU_WRAPPERS_V1 + #define configUSE_MPU_WRAPPERS_V1 0 +#endif + EXTERN pxCurrentTCB EXTERN vTaskSwitchContext EXTERN vPortSVCHandler_C +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + EXTERN vSystemCallEnter + EXTERN vSystemCallEnter_1 + EXTERN vSystemCallExit +#endif PUBLIC xIsPrivileged PUBLIC vResetPrivilege @@ -79,48 +88,79 @@ vResetPrivilege: THUMB /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +vRestoreContextOfFirstTask: + program_mpu_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context_first_task: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs_first_task: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs_first_task: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + + restore_context_done_first_task: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 + msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ + bx lr + +#else /* configENABLE_MPU */ + vRestoreContextOfFirstTask: ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* Read top of stack from TCB - The first item in pxCurrentTCB is the task top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldm r0!, {r1-r3} /* Read from stack - r1 = PSPLIM, r2 = CONTROL and r3 = EXC_RETURN. */ - msr psplim, r1 /* Set this task's PSPLIM value. */ - msr control, r2 /* Set this task's CONTROL value. */ - adds r0, #32 /* Discard everything up to r0. */ - msr psp, r0 /* This is now the new top of stack to use in the task. */ - isb - mov r0, #0 - msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ - bx r3 /* Finally, branch to EXC_RETURN. */ -#else /* configENABLE_MPU */ ldm r0!, {r1-r2} /* Read from stack - r1 = PSPLIM and r2 = EXC_RETURN. */ msr psplim, r1 /* Set this task's PSPLIM value. */ movs r1, #2 /* r1 = 2. */ @@ -131,6 +171,7 @@ vRestoreContextOfFirstTask: mov r0, #0 msr basepri, r0 /* Ensure that interrupts are enabled when the first task starts. */ bx r2 /* Finally, branch to EXC_RETURN. */ + #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ @@ -169,6 +210,114 @@ vClearInterruptMask: bx lr /* Return. */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + +PendSV_Handler: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + ldr r1, [r0] /* r1 = Location in TCB where the context should be saved. */ + mrs r2, psp /* r2 = PSP. */ + + save_general_regs: + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + add r2, r2, #0x20 /* Move r2 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r2, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r2, r2, #0x20 /* Set r2 back to the location of hardware saved context. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + stmia r1!, {r4-r11} /* Store r4-r11. */ + ldmia r2, {r4-r11} /* Copy the hardware saved context into r4-r11. */ + stmia r1!, {r4-r11} /* Store the hardware saved context. */ + + save_special_regs: + mrs r3, psplim /* r3 = PSPLIM. */ + mrs r4, control /* r4 = CONTROL. */ + stmia r1!, {r2-r4, lr} /* Store original PSP (after hardware has saved context), PSPLIM, CONTROL and LR. */ + str r1, [r0] /* Save the location from where the context should be restored as the first member of TCB. */ + + select_next_task: + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY + msr basepri, r0 /* Disable interrupts upto configMAX_SYSCALL_INTERRUPT_PRIORITY. */ + dsb + isb + bl vTaskSwitchContext + mov r0, #0 /* r0 = 0. */ + msr basepri, r0 /* Enable interrupts. */ + + program_mpu: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + bic r2, #1 /* r2 = r2 & ~1 i.e. Clear the bit 0 in r2. */ + str r2, [r1] /* Disable MPU. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to MAIR0 in TCB. */ + ldr r1, [r0] /* r1 = *r0 i.e. r1 = MAIR0. */ + ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ + str r1, [r2] /* Program MAIR0. */ + + adds r0, #4 /* r0 = r0 + 4. r0 now points to first RBAR in TCB. */ + ldr r1, =0xe000ed98 /* r1 = 0xe000ed98 [Location of RNR]. */ + ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ + + movs r3, #4 /* r3 = 4. */ + str r3, [r1] /* Program RNR = 4. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + + #if ( configTOTAL_MPU_REGIONS == 16 ) + movs r3, #8 /* r3 = 8. */ + str r3, [r1] /* Program RNR = 8. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + movs r3, #12 /* r3 = 12. */ + str r3, [r1] /* Program RNR = 12. */ + ldmia r0!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ + stmia r2, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ + #endif /* configTOTAL_MPU_REGIONS == 16 */ + + ldr r1, =0xe000ed94 /* r1 = 0xe000ed94 [Location of MPU_CTRL]. */ + ldr r2, [r1] /* Read the value of MPU_CTRL. */ + orr r2, #1 /* r2 = r2 | 1 i.e. Set the bit 0 in r2. */ + str r2, [r1] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + restore_context: + ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ + ldr r0, [r2] /* r0 = pxCurrentTCB.*/ + ldr r1, [r0] /* r1 = Location of saved context in TCB. */ + + restore_special_regs: + ldmdb r1!, {r2-r4, lr} /* r2 = original PSP, r3 = PSPLIM, r4 = CONTROL, LR restored. */ + msr psp, r2 + msr psplim, r3 + msr control, r4 + + restore_general_regs: + ldmdb r1!, {r4-r11} /* r4-r11 contain hardware saved context. */ + stmia r2!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r4-r11} /* r4-r11 restored. */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r2!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ + #endif /* configENABLE_FPU || configENABLE_MVE */ + + restore_context_done: + str r1, [r0] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr + +#else /* configENABLE_MPU */ + PendSV_Handler: mrs r0, psp /* Read PSP in r0. */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) @@ -176,16 +325,10 @@ PendSV_Handler: it eq vstmdbeq r0!, {s16-s31} /* Store the additional FP context registers which are not saved automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ -#if ( configENABLE_MPU == 1 ) - mrs r1, psplim /* r1 = PSPLIM. */ - mrs r2, control /* r2 = CONTROL. */ - mov r3, lr /* r3 = LR/EXC_RETURN. */ - stmdb r0!, {r1-r11} /* Store on the stack - PSPLIM, CONTROL, LR and registers that are not automatically saved. */ -#else /* configENABLE_MPU */ + mrs r2, psplim /* r2 = PSPLIM. */ mov r3, lr /* r3 = LR/EXC_RETURN. */ stmdb r0!, {r2-r11} /* Store on the stack - PSPLIM, LR and registers that are not automatically. */ -#endif /* configENABLE_MPU */ ldr r2, =pxCurrentTCB /* Read the location of pxCurrentTCB i.e. &( pxCurrentTCB ). */ ldr r1, [r2] /* Read pxCurrentTCB. */ @@ -203,37 +346,7 @@ PendSV_Handler: ldr r1, [r2] /* Read pxCurrentTCB. */ ldr r0, [r1] /* The first item in pxCurrentTCB is the task top of stack. r0 now points to the top of stack. */ -#if ( configENABLE_MPU == 1 ) - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - bic r4, r4, #1 /* r4 = r4 & ~1 i.e. Clear the bit 0 in r4. */ - str r4, [r2] /* Disable MPU. */ - - adds r1, #4 /* r1 = r1 + 4. r1 now points to MAIR0 in TCB. */ - ldr r3, [r1] /* r3 = *r1 i.e. r3 = MAIR0. */ - ldr r2, =0xe000edc0 /* r2 = 0xe000edc0 [Location of MAIR0]. */ - str r3, [r2] /* Program MAIR0. */ - ldr r2, =0xe000ed98 /* r2 = 0xe000ed98 [Location of RNR]. */ - movs r3, #4 /* r3 = 4. */ - str r3, [r2] /* Program RNR = 4. */ - adds r1, #4 /* r1 = r1 + 4. r1 now points to first RBAR in TCB. */ - ldr r2, =0xe000ed9c /* r2 = 0xe000ed9c [Location of RBAR]. */ - ldmia r1!, {r4-r11} /* Read 4 sets of RBAR/RLAR registers from TCB. */ - stmia r2!, {r4-r11} /* Write 4 set of RBAR/RLAR registers using alias registers. */ - - ldr r2, =0xe000ed94 /* r2 = 0xe000ed94 [Location of MPU_CTRL]. */ - ldr r4, [r2] /* Read the value of MPU_CTRL. */ - orr r4, r4, #1 /* r4 = r4 | 1 i.e. Set the bit 0 in r4. */ - str r4, [r2] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ -#endif /* configENABLE_MPU */ - -#if ( configENABLE_MPU == 1 ) - ldmia r0!, {r1-r11} /* Read from stack - r1 = PSPLIM, r2 = CONTROL, r3 = LR and r4-r11 restored. */ -#else /* configENABLE_MPU */ ldmia r0!, {r2-r11} /* Read from stack - r2 = PSPLIM, r3 = LR and r4-r11 restored. */ -#endif /* configENABLE_MPU */ #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) tst r3, #0x10 /* Test Bit[4] in LR. Bit[4] of EXC_RETURN is 0 if the Extended Stack Frame is in use. */ @@ -241,22 +354,53 @@ PendSV_Handler: vldmiaeq r0!, {s16-s31} /* Restore the additional FP context registers which are not restored automatically. */ #endif /* configENABLE_FPU || configENABLE_MVE */ - #if ( configENABLE_MPU == 1 ) - msr psplim, r1 /* Restore the PSPLIM register value for the task. */ - msr control, r2 /* Restore the CONTROL register value for the task. */ -#else /* configENABLE_MPU */ msr psplim, r2 /* Restore the PSPLIM register value for the task. */ -#endif /* configENABLE_MPU */ msr psp, r0 /* Remember the new top of stack for the task. */ bx r3 + +#endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) + +SVC_Handler: + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #4 /* portSVC_SYSTEM_CALL_ENTER. */ + beq syscall_enter + cmp r2, #5 /* portSVC_SYSTEM_CALL_ENTER_1. */ + beq syscall_enter_1 + cmp r2, #6 /* portSVC_SYSTEM_CALL_EXIT. */ + beq syscall_exit + b vPortSVCHandler_C + + syscall_enter: + mov r1, lr + b vSystemCallEnter + + syscall_enter_1: + mov r1, lr + b vSystemCallEnter_1 + + syscall_exit: + mov r1, lr + b vSystemCallExit + +#else /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + SVC_Handler: tst lr, #4 ite eq mrseq r0, msp mrsne r0, psp b vPortSVCHandler_C + +#endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ END diff --git a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h index c2ca5fa7730..65ac109c802 100644 --- a/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h +++ b/portable/IAR/ARM_CM85_NTZ/non_secure/portmacrocommon.h @@ -186,23 +186,120 @@ extern void vClearInterruptMask( uint32_t ulMask ) /* __attribute__(( naked )) P #define portMPU_REGION_EXECUTE_NEVER ( 1UL ) /*-----------------------------------------------------------*/ -/** - * @brief Settings to define an MPU region. - */ -typedef struct MPURegionSettings -{ - uint32_t ulRBAR; /**< RBAR for the region. */ - uint32_t ulRLAR; /**< RLAR for the region. */ -} MPURegionSettings_t; +#if ( configENABLE_MPU == 1 ) -/** - * @brief MPU settings as stored in the TCB. - */ -typedef struct MPU_SETTINGS -{ - uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ - MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ -} xMPU_SETTINGS; + /** + * @brief Settings to define an MPU region. + */ + typedef struct MPURegionSettings + { + uint32_t ulRBAR; /**< RBAR for the region. */ + uint32_t ulRLAR; /**< RLAR for the region. */ + } MPURegionSettings_t; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + /** + * @brief System call stack. + */ + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulSystemCallStackLimit; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + uint32_t ulStackLimitRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + + #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ + + /** + * @brief MPU settings as stored in the TCB. + */ + #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | | | PC, xPSR | CONTROL, EXC_RETURN | | + * +-----------+---------------+----------+-----------------+------------------------------+-----+ + * + * <-----------><--------------><---------><----------------><-----------------------------><----> + * 16 16 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 54 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * | s16-s31 | s0-s15, FPSCR | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | | | PC, xPSR | EXC_RETURN | | + * +-----------+---------------+----------+-----------------+----------------------+-----+ + * + * <-----------><--------------><---------><----------------><---------------------><----> + * 16 16 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 53 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #else /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + #if( configENABLE_TRUSTZONE == 1 ) + + /* + * +----------+-----------------+------------------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | xSecureContext, PSP, PSPLIM, | | + * | | PC, xPSR | CONTROL, EXC_RETURN | | + * +----------+-----------------+------------------------------+-----+ + * + * <---------><----------------><------------------------------><----> + * 8 8 5 1 + */ + #define MAX_CONTEXT_SIZE 22 + + #else /* #if( configENABLE_TRUSTZONE == 1 ) */ + + /* + * +----------+-----------------+----------------------+-----+ + * | r4-r11 | r0-r3, r12, LR, | PSP, PSPLIM, CONTROL | | + * | | PC, xPSR | EXC_RETURN | | + * +----------+-----------------+----------------------+-----+ + * + * <---------><----------------><----------------------><----> + * 8 8 4 1 + */ + #define MAX_CONTEXT_SIZE 21 + + #endif /* #if( configENABLE_TRUSTZONE == 1 ) */ + + #endif /* #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */ + + /* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ + #define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) + #define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + + typedef struct MPU_SETTINGS + { + uint32_t ulMAIR0; /**< MAIR0 for the task containing attributes for all the 4 per task regions. */ + MPURegionSettings_t xRegionsSettings[ portTOTAL_NUM_REGIONS ]; /**< Settings for 4 per task regions. */ + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif + } xMPU_SETTINGS; + +#endif /* configENABLE_MPU == 1 */ /*-----------------------------------------------------------*/ /** @@ -223,6 +320,9 @@ typedef struct MPU_SETTINGS #define portSVC_FREE_SECURE_CONTEXT 1 #define portSVC_START_SCHEDULER 2 #define portSVC_RAISE_PRIVILEGE 3 +#define portSVC_SYSTEM_CALL_ENTER 4 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 5 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 6 /*-----------------------------------------------------------*/ /** @@ -315,6 +415,20 @@ typedef struct MPU_SETTINGS #endif /* configENABLE_MPU */ /*-----------------------------------------------------------*/ +#if ( configENABLE_MPU == 1 ) + + extern BaseType_t xPortIsTaskPrivileged( void ); + + /** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ + #define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() + +#endif /* configENABLE_MPU == 1 */ +/*-----------------------------------------------------------*/ + /** * @brief Barriers. */ diff --git a/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c b/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c new file mode 100644 index 00000000000..aa1e825fc1d --- /dev/null +++ b/portable/RVDS/ARM_CM4_MPU/mpu_wrappers_v2_asm.c @@ -0,0 +1,1993 @@ +/* + * FreeRTOS Kernel + * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* Scheduler includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "queue.h" +#include "timers.h" +#include "event_groups.h" +#include "stream_buffer.h" + +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + +BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskDelayUntilImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskDelayUntil_Unpriv +MPU_xTaskDelayUntil_Priv + pop {r0} + b MPU_xTaskDelayUntilImpl +MPU_xTaskDelayUntil_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskDelayUntilImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskDelayUntil == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + +BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskAbortDelay( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskAbortDelayImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskAbortDelay_Unpriv +MPU_xTaskAbortDelay_Priv + pop {r0} + b MPU_xTaskAbortDelayImpl +MPU_xTaskAbortDelay_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskAbortDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskAbortDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + +void MPU_vTaskDelay( const TickType_t xTicksToDelay ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskDelay( const TickType_t xTicksToDelay ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskDelayImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskDelay_Unpriv +MPU_vTaskDelay_Priv + pop {r0} + b MPU_vTaskDelayImpl +MPU_vTaskDelay_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskDelayImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_vTaskDelay == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + +UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskPriorityGet( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskPriorityGetImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskPriorityGet_Unpriv +MPU_uxTaskPriorityGet_Priv + pop {r0} + b MPU_uxTaskPriorityGetImpl +MPU_uxTaskPriorityGet_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskPriorityGetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_eTaskGetState == 1 ) + +eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm eTaskState MPU_eTaskGetState( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_eTaskGetStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_eTaskGetState_Unpriv +MPU_eTaskGetState_Priv + pop {r0} + b MPU_eTaskGetStateImpl +MPU_eTaskGetState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_eTaskGetStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_eTaskGetState == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskGetInfoImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskGetInfo_Unpriv +MPU_vTaskGetInfo_Priv + pop {r0} + b MPU_vTaskGetInfoImpl +MPU_vTaskGetInfo_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskGetInfoImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + +TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xTaskGetIdleTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetIdleTaskHandleImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetIdleTaskHandle_Unpriv +MPU_xTaskGetIdleTaskHandle_Priv + pop {r0} + b MPU_xTaskGetIdleTaskHandleImpl +MPU_xTaskGetIdleTaskHandle_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetIdleTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSuspend( TaskHandle_t xTaskToSuspend ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSuspendImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSuspend_Unpriv +MPU_vTaskSuspend_Priv + pop {r0} + b MPU_vTaskSuspendImpl +MPU_vTaskSuspend_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSuspendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + +void MPU_vTaskResume( TaskHandle_t xTaskToResume ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskResume( TaskHandle_t xTaskToResume ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskResumeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskResume_Unpriv +MPU_vTaskResume_Priv + pop {r0} + b MPU_vTaskResumeImpl +MPU_vTaskResume_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskResumeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ +/*-----------------------------------------------------------*/ + +TickType_t MPU_xTaskGetTickCount( void ) FREERTOS_SYSTEM_CALL; + +__asm TickType_t MPU_xTaskGetTickCount( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetTickCountImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetTickCount_Unpriv +MPU_xTaskGetTickCount_Priv + pop {r0} + b MPU_xTaskGetTickCountImpl +MPU_xTaskGetTickCount_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetTickCountImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskGetNumberOfTasks( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetNumberOfTasksImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetNumberOfTasks_Unpriv +MPU_uxTaskGetNumberOfTasks_Priv + pop {r0} + b MPU_uxTaskGetNumberOfTasksImpl +MPU_uxTaskGetNumberOfTasks_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetNumberOfTasksImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) FREERTOS_SYSTEM_CALL; + +__asm char * MPU_pcTaskGetName( TaskHandle_t xTaskToQuery ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pcTaskGetNameImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTaskGetName_Unpriv +MPU_pcTaskGetName_Priv + pop {r0} + b MPU_pcTaskGetNameImpl +MPU_pcTaskGetName_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTaskGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimeCounter( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetRunTimeCounterImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimeCounter_Unpriv +MPU_ulTaskGetRunTimeCounter_Priv + pop {r0} + b MPU_ulTaskGetRunTimeCounterImpl +MPU_ulTaskGetRunTimeCounter_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetRunTimePercent( const TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetRunTimePercentImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetRunTimePercent_Unpriv +MPU_ulTaskGetRunTimePercent_Priv + pop {r0} + b MPU_ulTaskGetRunTimePercentImpl +MPU_ulTaskGetRunTimePercent_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimePercent( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetIdleRunTimePercentImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimePercent_Unpriv +MPU_ulTaskGetIdleRunTimePercent_Priv + pop {r0} + b MPU_ulTaskGetIdleRunTimePercentImpl +MPU_ulTaskGetIdleRunTimePercent_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimePercentImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + +configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) FREERTOS_SYSTEM_CALL; + +__asm configRUN_TIME_COUNTER_TYPE MPU_ulTaskGetIdleRunTimeCounter( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGetIdleRunTimeCounterImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGetIdleRunTimeCounter_Unpriv +MPU_ulTaskGetIdleRunTimeCounter_Priv + pop {r0} + b MPU_ulTaskGetIdleRunTimeCounterImpl +MPU_ulTaskGetIdleRunTimeCounter_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGetIdleRunTimeCounterImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSetApplicationTaskTagImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetApplicationTaskTag_Unpriv +MPU_vTaskSetApplicationTaskTag_Priv + pop {r0} + b MPU_vTaskSetApplicationTaskTagImpl +MPU_vTaskSetApplicationTaskTag_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + +TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm TaskHookFunction_t MPU_xTaskGetApplicationTaskTag( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetApplicationTaskTagImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetApplicationTaskTag_Unpriv +MPU_xTaskGetApplicationTaskTag_Priv + pop {r0} + b MPU_xTaskGetApplicationTaskTagImpl +MPU_xTaskGetApplicationTaskTag_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetApplicationTaskTagImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_APPLICATION_TASK_TAG == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSetThreadLocalStoragePointerImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetThreadLocalStoragePointer_Unpriv +MPU_vTaskSetThreadLocalStoragePointer_Priv + pop {r0} + b MPU_vTaskSetThreadLocalStoragePointerImpl +MPU_vTaskSetThreadLocalStoragePointer_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + +void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) FREERTOS_SYSTEM_CALL; + +__asm void * MPU_pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pvTaskGetThreadLocalStoragePointerImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTaskGetThreadLocalStoragePointer_Unpriv +MPU_pvTaskGetThreadLocalStoragePointer_Priv + pop {r0} + b MPU_pvTaskGetThreadLocalStoragePointerImpl +MPU_pvTaskGetThreadLocalStoragePointer_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTaskGetThreadLocalStoragePointerImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetSystemStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetSystemState_Unpriv +MPU_uxTaskGetSystemState_Priv + pop {r0} + b MPU_uxTaskGetSystemStateImpl +MPU_uxTaskGetSystemState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetSystemStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TRACE_FACILITY == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + +UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetStackHighWaterMarkImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark_Unpriv +MPU_uxTaskGetStackHighWaterMark_Priv + pop {r0} + b MPU_uxTaskGetStackHighWaterMarkImpl +MPU_uxTaskGetStackHighWaterMark_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMarkImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) FREERTOS_SYSTEM_CALL; + +__asm configSTACK_DEPTH_TYPE MPU_uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTaskGetStackHighWaterMark2Impl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTaskGetStackHighWaterMark2_Unpriv +MPU_uxTaskGetStackHighWaterMark2_Priv + pop {r0} + b MPU_uxTaskGetStackHighWaterMark2Impl +MPU_uxTaskGetStackHighWaterMark2_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTaskGetStackHighWaterMark2Impl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + +TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xTaskGetCurrentTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetCurrentTaskHandleImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetCurrentTaskHandle_Unpriv +MPU_xTaskGetCurrentTaskHandle_Priv + pop {r0} + b MPU_xTaskGetCurrentTaskHandleImpl +MPU_xTaskGetCurrentTaskHandle_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetCurrentTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetSchedulerState == 1 ) + +BaseType_t MPU_xTaskGetSchedulerState( void ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGetSchedulerState( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGetSchedulerStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGetSchedulerState_Unpriv +MPU_xTaskGetSchedulerState_Priv + pop {r0} + b MPU_xTaskGetSchedulerStateImpl +MPU_xTaskGetSchedulerState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGetSchedulerStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( INCLUDE_xTaskGetSchedulerState == 1 ) */ +/*-----------------------------------------------------------*/ + +void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTaskSetTimeOutStateImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTaskSetTimeOutState_Unpriv +MPU_vTaskSetTimeOutState_Priv + pop {r0} + b MPU_vTaskSetTimeOutStateImpl +MPU_vTaskSetTimeOutState_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTaskSetTimeOutStateImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskCheckForTimeOutImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskCheckForTimeOut_Unpriv +MPU_xTaskCheckForTimeOut_Priv + pop {r0} + b MPU_xTaskCheckForTimeOutImpl +MPU_xTaskCheckForTimeOut_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskCheckForTimeOutImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGenericNotifyImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotify_Unpriv +MPU_xTaskGenericNotify_Priv + pop {r0} + b MPU_xTaskGenericNotifyImpl +MPU_xTaskGenericNotify_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGenericNotifyWaitImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyWait_Unpriv +MPU_xTaskGenericNotifyWait_Priv + pop {r0} + b MPU_xTaskGenericNotifyWaitImpl +MPU_xTaskGenericNotifyWait_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTaskGenericNotifyWaitImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm uint32_t MPU_ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGenericNotifyTakeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyTake_Unpriv +MPU_ulTaskGenericNotifyTake_Priv + pop {r0} + b MPU_ulTaskGenericNotifyTakeImpl +MPU_ulTaskGenericNotifyTake_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTaskGenericNotifyStateClearImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTaskGenericNotifyStateClear_Unpriv +MPU_xTaskGenericNotifyStateClear_Priv + pop {r0} + b MPU_xTaskGenericNotifyStateClearImpl +MPU_xTaskGenericNotifyStateClear_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTaskGenericNotifyStateClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + +uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) FREERTOS_SYSTEM_CALL; + +__asm uint32_t MPU_ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_ulTaskGenericNotifyValueClearImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_ulTaskGenericNotifyValueClear_Unpriv +MPU_ulTaskGenericNotifyValueClear_Priv + pop {r0} + b MPU_ulTaskGenericNotifyValueClearImpl +MPU_ulTaskGenericNotifyValueClear_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_ulTaskGenericNotifyValueClearImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueGenericSend( QueueHandle_t xQueue, + const void * const pvItemToQueue, + TickType_t xTicksToWait, + const BaseType_t xCopyPosition ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueGenericSendImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGenericSend_Unpriv +MPU_xQueueGenericSend_Priv + pop {r0} + b MPU_xQueueGenericSendImpl +MPU_xQueueGenericSend_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGenericSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxQueueMessagesWaiting( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxQueueMessagesWaitingImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueMessagesWaiting_Unpriv +MPU_uxQueueMessagesWaiting_Priv + pop {r0} + b MPU_uxQueueMessagesWaitingImpl +MPU_uxQueueMessagesWaiting_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueMessagesWaitingImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxQueueSpacesAvailable( const QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxQueueSpacesAvailableImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxQueueSpacesAvailable_Unpriv +MPU_uxQueueSpacesAvailable_Priv + pop {r0} + b MPU_uxQueueSpacesAvailableImpl +MPU_uxQueueSpacesAvailable_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxQueueSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueReceive( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueReceiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueReceive_Unpriv +MPU_xQueueReceive_Priv + pop {r0} + b MPU_xQueueReceiveImpl +MPU_xQueueReceive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueuePeek( QueueHandle_t xQueue, + void * const pvBuffer, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueuePeekImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueuePeek_Unpriv +MPU_xQueuePeek_Priv + pop {r0} + b MPU_xQueuePeekImpl +MPU_xQueuePeek_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueuePeekImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueSemaphoreTake( QueueHandle_t xQueue, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueSemaphoreTakeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSemaphoreTake_Unpriv +MPU_xQueueSemaphoreTake_Priv + pop {r0} + b MPU_xQueueSemaphoreTakeImpl +MPU_xQueueSemaphoreTake_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSemaphoreTakeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) + +TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xQueueGetMutexHolder( QueueHandle_t xSemaphore ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueGetMutexHolderImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGetMutexHolder_Unpriv +MPU_xQueueGetMutexHolder_Priv + pop {r0} + b MPU_xQueueGetMutexHolderImpl +MPU_xQueueGetMutexHolder_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGetMutexHolderImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueTakeMutexRecursive( QueueHandle_t xMutex, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueTakeMutexRecursiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueTakeMutexRecursive_Unpriv +MPU_xQueueTakeMutexRecursive_Priv + pop {r0} + b MPU_xQueueTakeMutexRecursiveImpl +MPU_xQueueTakeMutexRecursive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueTakeMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_RECURSIVE_MUTEXES == 1 ) + +BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueGiveMutexRecursive( QueueHandle_t pxMutex ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueGiveMutexRecursiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueGiveMutexRecursive_Unpriv +MPU_xQueueGiveMutexRecursive_Priv + pop {r0} + b MPU_xQueueGiveMutexRecursiveImpl +MPU_xQueueGiveMutexRecursive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueGiveMutexRecursiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_RECURSIVE_MUTEXES == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm QueueSetMemberHandle_t MPU_xQueueSelectFromSet( QueueSetHandle_t xQueueSet, + const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueSelectFromSetImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueSelectFromSet_Unpriv +MPU_xQueueSelectFromSet_Priv + pop {r0} + b MPU_xQueueSelectFromSetImpl +MPU_xQueueSelectFromSet_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueSelectFromSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_QUEUE_SETS == 1 ) + +BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, + QueueSetHandle_t xQueueSet ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xQueueAddToSetImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xQueueAddToSet_Unpriv +MPU_xQueueAddToSet_Priv + pop {r0} + b MPU_xQueueAddToSetImpl +MPU_xQueueAddToSet_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xQueueAddToSetImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_QUEUE_SETS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vQueueAddToRegistry( QueueHandle_t xQueue, + const char * pcName ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vQueueAddToRegistryImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueAddToRegistry_Unpriv +MPU_vQueueAddToRegistry_Priv + pop {r0} + b MPU_vQueueAddToRegistryImpl +MPU_vQueueAddToRegistry_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueAddToRegistryImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vQueueUnregisterQueue( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vQueueUnregisterQueueImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vQueueUnregisterQueue_Unpriv +MPU_vQueueUnregisterQueue_Priv + pop {r0} + b MPU_vQueueUnregisterQueueImpl +MPU_vQueueUnregisterQueue_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vQueueUnregisterQueueImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configQUEUE_REGISTRY_SIZE > 0 ) + +const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) FREERTOS_SYSTEM_CALL; + +__asm const char * MPU_pcQueueGetName( QueueHandle_t xQueue ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pcQueueGetNameImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcQueueGetName_Unpriv +MPU_pcQueueGetName_Priv + pop {r0} + b MPU_pcQueueGetNameImpl +MPU_pcQueueGetName_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcQueueGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configQUEUE_REGISTRY_SIZE > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm void * MPU_pvTimerGetTimerID( const TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pvTimerGetTimerIDImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pvTimerGetTimerID_Unpriv +MPU_pvTimerGetTimerID_Priv + pop {r0} + b MPU_pvTimerGetTimerIDImpl +MPU_pvTimerGetTimerID_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pvTimerGetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTimerSetTimerID( TimerHandle_t xTimer, + void * pvNewID ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTimerSetTimerIDImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetTimerID_Unpriv +MPU_vTimerSetTimerID_Priv + pop {r0} + b MPU_vTimerSetTimerIDImpl +MPU_vTimerSetTimerID_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetTimerIDImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTimerIsTimerActive( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerIsTimerActiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerIsTimerActive_Unpriv +MPU_xTimerIsTimerActive_Priv + pop {r0} + b MPU_xTimerIsTimerActiveImpl +MPU_xTimerIsTimerActive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerIsTimerActiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) FREERTOS_SYSTEM_CALL; + +__asm TaskHandle_t MPU_xTimerGetTimerDaemonTaskHandle( void ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetTimerDaemonTaskHandleImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetTimerDaemonTaskHandle_Unpriv +MPU_xTimerGetTimerDaemonTaskHandle_Priv + pop {r0} + b MPU_xTimerGetTimerDaemonTaskHandleImpl +MPU_xTimerGetTimerDaemonTaskHandle_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetTimerDaemonTaskHandleImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTimerGenericCommand( TimerHandle_t xTimer, + const BaseType_t xCommandID, + const TickType_t xOptionalValue, + BaseType_t * const pxHigherPriorityTaskWoken, + const TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGenericCommandImpl + + push {r0} + mrs r0, ipsr + cmp r0, #0 + bne MPU_xTimerGenericCommand_Priv + mrs r0, control + tst r0, #1 + beq MPU_xTimerGenericCommand_Priv +MPU_xTimerGenericCommand_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xTimerGenericCommandImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +MPU_xTimerGenericCommand_Priv + pop {r0} + b MPU_xTimerGenericCommandImpl +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm const char * MPU_pcTimerGetName( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_pcTimerGetNameImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_pcTimerGetName_Unpriv +MPU_pcTimerGetName_Priv + pop {r0} + b MPU_pcTimerGetNameImpl +MPU_pcTimerGetName_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_pcTimerGetNameImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vTimerSetReloadMode( TimerHandle_t xTimer, + const BaseType_t uxAutoReload ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vTimerSetReloadModeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vTimerSetReloadMode_Unpriv +MPU_vTimerSetReloadMode_Priv + pop {r0} + b MPU_vTimerSetReloadModeImpl +MPU_vTimerSetReloadMode_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vTimerSetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetReloadModeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetReloadMode_Unpriv +MPU_xTimerGetReloadMode_Priv + pop {r0} + b MPU_xTimerGetReloadModeImpl +MPU_xTimerGetReloadMode_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxTimerGetReloadMode( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxTimerGetReloadModeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxTimerGetReloadMode_Unpriv +MPU_uxTimerGetReloadMode_Priv + pop {r0} + b MPU_uxTimerGetReloadModeImpl +MPU_uxTimerGetReloadMode_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxTimerGetReloadModeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm TickType_t MPU_xTimerGetPeriod( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetPeriodImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetPeriod_Unpriv +MPU_xTimerGetPeriod_Priv + pop {r0} + b MPU_xTimerGetPeriodImpl +MPU_xTimerGetPeriod_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetPeriodImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + +TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) FREERTOS_SYSTEM_CALL; + +__asm TickType_t MPU_xTimerGetExpiryTime( TimerHandle_t xTimer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xTimerGetExpiryTimeImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xTimerGetExpiryTime_Unpriv +MPU_xTimerGetExpiryTime_Priv + pop {r0} + b MPU_xTimerGetExpiryTimeImpl +MPU_xTimerGetExpiryTime_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xTimerGetExpiryTimeImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /* if ( configUSE_TIMERS == 1 ) */ +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupWaitBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToWaitFor, + const BaseType_t xClearOnExit, + const BaseType_t xWaitForAllBits, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupWaitBitsImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupWaitBits_Unpriv +MPU_xEventGroupWaitBits_Priv + pop {r0} + b MPU_xEventGroupWaitBitsImpl +MPU_xEventGroupWaitBits_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER_1 + bl MPU_xEventGroupWaitBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupClearBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToClear ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupClearBitsImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupClearBits_Unpriv +MPU_xEventGroupClearBits_Priv + pop {r0} + b MPU_xEventGroupClearBitsImpl +MPU_xEventGroupClearBits_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupClearBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupSetBits( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupSetBitsImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSetBits_Unpriv +MPU_xEventGroupSetBits_Priv + pop {r0} + b MPU_xEventGroupSetBitsImpl +MPU_xEventGroupSetBits_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSetBitsImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm EventBits_t MPU_xEventGroupSync( EventGroupHandle_t xEventGroup, + const EventBits_t uxBitsToSet, + const EventBits_t uxBitsToWaitFor, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xEventGroupSyncImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xEventGroupSync_Unpriv +MPU_xEventGroupSync_Priv + pop {r0} + b MPU_xEventGroupSyncImpl +MPU_xEventGroupSync_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xEventGroupSyncImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) FREERTOS_SYSTEM_CALL; + +__asm UBaseType_t MPU_uxEventGroupGetNumber( void * xEventGroup ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_uxEventGroupGetNumberImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_uxEventGroupGetNumber_Unpriv +MPU_uxEventGroupGetNumber_Priv + pop {r0} + b MPU_uxEventGroupGetNumberImpl +MPU_uxEventGroupGetNumber_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_uxEventGroupGetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + +void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) FREERTOS_SYSTEM_CALL; + +__asm void MPU_vEventGroupSetNumber( void * xEventGroup, + UBaseType_t uxEventGroupNumber ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_vEventGroupSetNumberImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_vEventGroupSetNumber_Unpriv +MPU_vEventGroupSetNumber_Priv + pop {r0} + b MPU_vEventGroupSetNumberImpl +MPU_vEventGroupSetNumber_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_vEventGroupSetNumberImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} + +#endif /*( configUSE_TRACE_FACILITY == 1 )*/ +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferSend( StreamBufferHandle_t xStreamBuffer, + const void * pvTxData, + size_t xDataLengthBytes, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferSendImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSend_Unpriv +MPU_xStreamBufferSend_Priv + pop {r0} + b MPU_xStreamBufferSendImpl +MPU_xStreamBufferSend_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSendImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer, + void * pvRxData, + size_t xBufferLengthBytes, + TickType_t xTicksToWait ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferReceiveImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferReceive_Unpriv +MPU_xStreamBufferReceive_Priv + pop {r0} + b MPU_xStreamBufferReceiveImpl +MPU_xStreamBufferReceive_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferReceiveImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferIsFullImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsFull_Unpriv +MPU_xStreamBufferIsFull_Priv + pop {r0} + b MPU_xStreamBufferIsFullImpl +MPU_xStreamBufferIsFull_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsFullImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferIsEmptyImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferIsEmpty_Unpriv +MPU_xStreamBufferIsEmpty_Priv + pop {r0} + b MPU_xStreamBufferIsEmptyImpl +MPU_xStreamBufferIsEmpty_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferIsEmptyImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferSpacesAvailableImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSpacesAvailable_Unpriv +MPU_xStreamBufferSpacesAvailable_Priv + pop {r0} + b MPU_xStreamBufferSpacesAvailableImpl +MPU_xStreamBufferSpacesAvailable_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSpacesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferBytesAvailableImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferBytesAvailable_Unpriv +MPU_xStreamBufferBytesAvailable_Priv + pop {r0} + b MPU_xStreamBufferBytesAvailableImpl +MPU_xStreamBufferBytesAvailable_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferBytesAvailableImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) FREERTOS_SYSTEM_CALL; + +__asm BaseType_t MPU_xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer, + size_t xTriggerLevel ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferSetTriggerLevelImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferSetTriggerLevel_Unpriv +MPU_xStreamBufferSetTriggerLevel_Priv + pop {r0} + b MPU_xStreamBufferSetTriggerLevelImpl +MPU_xStreamBufferSetTriggerLevel_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferSetTriggerLevelImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) FREERTOS_SYSTEM_CALL; + +__asm size_t MPU_xStreamBufferNextMessageLengthBytes( StreamBufferHandle_t xStreamBuffer ) /* FREERTOS_SYSTEM_CALL */ +{ + PRESERVE8 + extern MPU_xStreamBufferNextMessageLengthBytesImpl + + push {r0} + mrs r0, control + tst r0, #1 + bne MPU_xStreamBufferNextMessageLengthBytes_Unpriv +MPU_xStreamBufferNextMessageLengthBytes_Priv + pop {r0} + b MPU_xStreamBufferNextMessageLengthBytesImpl +MPU_xStreamBufferNextMessageLengthBytes_Unpriv + pop {r0} + svc #portSVC_SYSTEM_CALL_ENTER + bl MPU_xStreamBufferNextMessageLengthBytesImpl + svc #portSVC_SYSTEM_CALL_EXIT + bx lr +} +/*-----------------------------------------------------------*/ + +#endif /* configUSE_MPU_WRAPPERS_V1 == 0 */ diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index ac35afb7b08..beb52ea22ed 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -108,13 +108,34 @@ #define portPRIORITY_GROUP_MASK ( 0x07UL << 8UL ) #define portPRIGROUP_SHIFT ( 8UL ) +/* Constants used during system call enter and exit. */ +#define portPSR_STACK_PADDING_MASK ( 1UL << 9UL ) +#define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL ) + /* Offsets in the stack to the parameters when inside the SVC handler. */ +#define portOFFSET_TO_LR ( 5 ) #define portOFFSET_TO_PC ( 6 ) +#define portOFFSET_TO_PSR ( 7 ) /* For strict compliance with the Cortex-M spec the task start address should * have bit-0 clear, as it is loaded into the PC on exit from an ISR. */ #define portSTART_ADDRESS_MASK ( ( StackType_t ) 0xfffffffeUL ) +/* Does addr lie within [start, end] address range? */ +#define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \ + ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) ) + +/* Is the access request satisfied by the available permissions? */ +#define portIS_AUTHORIZED( accessRequest, permissions ) \ + ( ( ( permissions ) & ( accessRequest ) ) == accessRequest ) + +/* Max value that fits in a uint32_t type. */ +#define portUINT32_MAX ( ~( ( uint32_t ) 0 ) ) + +/* Check if adding a and b will result in overflow. */ +#define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) ) +/*-----------------------------------------------------------*/ + /* Each task maintains its own interrupt status in the critical nesting * variable. Note this is not saved as part of the task context as context * switches can only occur when uxCriticalNesting is zero. */ @@ -158,7 +179,7 @@ static void prvRestoreContextOfFirstTask( void ) PRIVILEGED_FUNCTION; * C portion of the SVC handler. The SVC handler is split between an asm entry * and a C wrapper for simplicity of coding and maintenance. */ -void prvSVCHandler( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION; +void vSVCHandler_C( uint32_t * pulRegisters ) __attribute__( ( used ) ) PRIVILEGED_FUNCTION; /* * Function to enable the VFP. @@ -215,6 +236,61 @@ void vResetPrivilege( void ); #else void vPortExitCritical( void ) PRIVILEGED_FUNCTION; #endif + +/** + * @brief Triggers lazy stacking of FPU registers. + */ +static void prvTriggerLazyStacking( void ) PRIVILEGED_FUNCTION; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with up to 4 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the system call stack so that upon returning from + * SVC, the system call stack is used. + * + * It is used for the system calls with 5 parameters. + * + * @param pulTaskStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + /** + * @brief Sets up the task stack so that upon returning from + * SVC, the task stack is used again. + * + * @param pulSystemCallStack The current SP when the SVC was raised. + * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler. + */ + void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) PRIVILEGED_FUNCTION; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION; /*-----------------------------------------------------------*/ /* @@ -223,43 +299,59 @@ void vResetPrivilege( void ); StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters, - BaseType_t xRunPrivileged ) + BaseType_t xRunPrivileged, + xMPU_SETTINGS * xMPUSettings ) { - /* Simulate the stack frame as it would be created by a context switch - * interrupt. */ - pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */ - *pxTopOfStack = portINITIAL_XPSR; /* xPSR */ - pxTopOfStack--; - *pxTopOfStack = ( ( StackType_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC */ - pxTopOfStack--; - *pxTopOfStack = 0; /* LR */ - pxTopOfStack -= 5; /* R12, R3, R2 and R1. */ - *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */ - - /* A save method is being used that requires each task to maintain its - * own exec return value. */ - pxTopOfStack--; - *pxTopOfStack = portINITIAL_EXC_RETURN; - - pxTopOfStack -= 9; /* R11, R10, R9, R8, R7, R6, R5 and R4. */ - if( xRunPrivileged == pdTRUE ) { - *pxTopOfStack = portINITIAL_CONTROL_IF_PRIVILEGED; + xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG; + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_PRIVILEGED; } else { - *pxTopOfStack = portINITIAL_CONTROL_IF_UNPRIVILEGED; + xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG ); + xMPUSettings->ulContext[ 0 ] = portINITIAL_CONTROL_IF_UNPRIVILEGED; + } + xMPUSettings->ulContext[ 1 ] = 0x04040404; /* r4. */ + xMPUSettings->ulContext[ 2 ] = 0x05050505; /* r5. */ + xMPUSettings->ulContext[ 3 ] = 0x06060606; /* r6. */ + xMPUSettings->ulContext[ 4 ] = 0x07070707; /* r7. */ + xMPUSettings->ulContext[ 5 ] = 0x08080808; /* r8. */ + xMPUSettings->ulContext[ 6 ] = 0x09090909; /* r9. */ + xMPUSettings->ulContext[ 7 ] = 0x10101010; /* r10. */ + xMPUSettings->ulContext[ 8 ] = 0x11111111; /* r11. */ + xMPUSettings->ulContext[ 9 ] = portINITIAL_EXC_RETURN; /* EXC_RETURN. */ + + xMPUSettings->ulContext[ 10 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */ + xMPUSettings->ulContext[ 11 ] = ( uint32_t ) pvParameters; /* r0. */ + xMPUSettings->ulContext[ 12 ] = 0x01010101; /* r1. */ + xMPUSettings->ulContext[ 13 ] = 0x02020202; /* r2. */ + xMPUSettings->ulContext[ 14 ] = 0x03030303; /* r3. */ + xMPUSettings->ulContext[ 15 ] = 0x12121212; /* r12. */ + xMPUSettings->ulContext[ 16 ] = 0; /* LR. */ + xMPUSettings->ulContext[ 17 ] = ( ( uint32_t ) pxCode ) & portSTART_ADDRESS_MASK; /* PC. */ + xMPUSettings->ulContext[ 18 ] = portINITIAL_XPSR; /* xPSR. */ + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + { + /* Ensure that the system call stack is double word aligned. */ + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] ); + xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) & + ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) ); + + /* This is not NULL only for the duration of a system call. */ + xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL; } + #endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ - return pxTopOfStack; + return &( xMPUSettings->ulContext[ 19 ] ); } /*-----------------------------------------------------------*/ -void prvSVCHandler( uint32_t * pulParam ) +void vSVCHandler_C( uint32_t * pulParam ) { uint8_t ucSVCNumber; - uint32_t ulReg, ulPC; + uint32_t ulPC, ulReg; #if ( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) extern uint32_t __syscalls_flash_start__; @@ -300,11 +392,11 @@ void prvSVCHandler( uint32_t * pulParam ) { __asm { -/* *INDENT-OFF* */ + /* *INDENT-OFF* */ mrs ulReg, control /* Obtain current control value. */ bic ulReg, # 1 /* Set privilege bit. */ msr control, ulReg /* Write back new control value. */ -/* *INDENT-ON* */ + /* *INDENT-ON* */ } } @@ -313,14 +405,14 @@ void prvSVCHandler( uint32_t * pulParam ) case portSVC_RAISE_PRIVILEGE: __asm { -/* *INDENT-OFF* */ + /* *INDENT-OFF* */ mrs ulReg, control /* Obtain current control value. */ bic ulReg, # 1 /* Set privilege bit. */ msr control, ulReg /* Write back new control value. */ -/* *INDENT-ON* */ + /* *INDENT-ON* */ } break; - #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ + #endif /* #if( configENFORCE_SYSTEM_CALLS_FROM_KERNEL_ONLY == 1 ) */ default: /* Unknown SVC call. */ break; @@ -328,9 +420,339 @@ void prvSVCHandler( uint32_t * pulParam ) } /*-----------------------------------------------------------*/ +__asm void prvTriggerLazyStacking( void ) /* PRIVILEGED_FUNCTION */ +{ +/* *INDENT-OFF* */ + PRESERVE8 + + vpush {s0} /* Trigger lazy stacking. */ + vpop {s0} /* Nullify the affect of the above instruction. */ + +/* *INDENT-ON* */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1; + extern uint32_t __syscalls_flash_start__; + extern uint32_t __syscalls_flash_end__; + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + prvTriggerLazyStacking(); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm + { + msr psp, pulSystemCallStack + }; + + /* Raise the privilege for the duration of the system call. */ + __asm + { + mrs r1, control /* Obtain current control value. */ + bic r1, #1 /* Clear nPRIV bit. */ + msr control, r1 /* Write back new control value. */ + }; + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallEnter_1( uint32_t * pulTaskStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulSystemCallStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1; + extern uint32_t __syscalls_flash_start__; + extern uint32_t __syscalls_flash_end__; + + ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack; + + /* This is not NULL only for the duration of the system call. */ + configASSERT( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ); + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + prvTriggerLazyStacking(); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the system call stack for the stack frame and + * the parameter passed on the stack. We only need to copy one + * parameter but we still reserve 2 spaces to keep the stack + * double word aligned. */ + pulSystemCallStack = pulSystemCallStack - ulStackFrameSize - 2UL; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulSystemCallStack[ i ] = pulTaskStack[ i ]; + } + + /* Copy the parameter which is passed the stack. */ + if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK ) + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize + 1 ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG; + } + else + { + pulSystemCallStack[ ulStackFrameSize ] = pulTaskStack[ ulStackFrameSize ]; + /* Record if the hardware used padding to force the stack pointer + * to be double word aligned. */ + pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG ); + } + + /* Use the pulSystemCallStack in thread mode. */ + __asm + { + msr psp, pulSystemCallStack + }; + + /* Raise the privilege for the duration of the system call. */ + __asm + { + mrs r1, control /* Obtain current control value. */ + bic r1, #1 /* Clear nPRIV bit. */ + msr control, r1 /* Write back new control value. */ + }; + + /* Remember the location where we should copy the stack frame when we exit from + * the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize; + + /* Store the value of the Link Register before the SVC was raised. We need to + * restore it when we exit from the system call. */ + pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ]; + + /* We ensure in pxPortInitialiseStack that the system call stack is + * double word aligned and therefore, there is no need of padding. + * Clear the bit[9] of stacked xPSR. */ + pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + +void vSystemCallExit( uint32_t * pulSystemCallStack, uint32_t ulLR ) /* PRIVILEGED_FUNCTION */ +{ + extern TaskHandle_t pxCurrentTCB; + xMPU_SETTINGS * pxMpuSettings; + uint32_t * pulTaskStack; + uint32_t ulStackFrameSize, ulSystemCallLocation, i, r1; + extern uint32_t __syscalls_flash_start__; + extern uint32_t __syscalls_flash_end__; + + ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ]; + + /* If the request did not come from the system call section, do nothing. */ + if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) && + ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) ) + { + pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB ); + pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack; + + if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL ) + { + /* Extended frame i.e. FPU in use. */ + ulStackFrameSize = 26; + prvTriggerLazyStacking(); + } + else + { + /* Standard frame i.e. FPU not in use. */ + ulStackFrameSize = 8; + } + + /* Make space on the task stack for the stack frame. */ + pulTaskStack = pulTaskStack - ulStackFrameSize; + + /* Copy the stack frame. */ + for( i = 0; i < ulStackFrameSize; i++ ) + { + pulTaskStack[ i ] = pulSystemCallStack[ i ]; + } + + /* Use the pulTaskStack in thread mode. */ + __asm + { + msr psp, pulTaskStack + }; + + /* Drop the privilege before returning to the thread mode. */ + __asm + { + mrs r1, control /* Obtain current control value. */ + orr r1, #1 /* Set nPRIV bit. */ + msr control, r1 /* Write back new control value. */ + }; + + /* Restore the stacked link register to what it was at the time of + * system call entry. */ + pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry; + + /* If the hardware used padding to force the stack pointer + * to be double word aligned, set the stacked xPSR bit[9], + * otherwise clear it. */ + if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG ) + { + pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK; + } + else + { + pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK ); + } + + /* This is not NULL only for the duration of the system call. */ + pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL; + } +} + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ +/*-----------------------------------------------------------*/ + +BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */ +{ + BaseType_t xTaskIsPrivileged = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xTaskIsPrivileged = pdTRUE; + } + + return xTaskIsPrivileged; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + __asm void vPortSVCHandler( void ) { - extern prvSVCHandler + extern vSVCHandler_C + extern vSystemCallEnter + extern vSystemCallEnter_1 + extern vSystemCallExit + +/* *INDENT-OFF* */ + PRESERVE8 + + tst lr, #4 + ite eq + mrseq r0, msp + mrsne r0, psp + + ldr r1, [r0, #24] + ldrb r2, [r1, #-2] + cmp r2, #portSVC_SYSTEM_CALL_ENTER + beq syscall_enter + cmp r2, #portSVC_SYSTEM_CALL_ENTER_1 + beq syscall_enter_1 + cmp r2, #portSVC_SYSTEM_CALL_EXIT + beq syscall_exit + b vSVCHandler_C + +syscall_enter + mov r1, lr + b vSystemCallEnter + +syscall_enter_1 + mov r1, lr + b vSystemCallEnter_1 + +syscall_exit + mov r1, lr + b vSystemCallExit +/* *INDENT-ON* */ +} + +#else /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +__asm void vPortSVCHandler( void ) +{ + extern vSVCHandler_C /* *INDENT-OFF* */ PRESERVE8 @@ -345,9 +767,11 @@ __asm void vPortSVCHandler( void ) mrs r0, psp #endif - b prvSVCHandler + b vSVCHandler_C /* *INDENT-ON* */ } + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ /*-----------------------------------------------------------*/ __asm void prvRestoreContextOfFirstTask( void ) @@ -355,45 +779,54 @@ __asm void prvRestoreContextOfFirstTask( void ) /* *INDENT-OFF* */ PRESERVE8 - ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */ - ldr r0, [ r0 ] - ldr r0, [ r0 ] - msr msp, r0 /* Set the msp back to the start of the stack. */ - ldr r3, =pxCurrentTCB /* Restore the context. */ - ldr r1, [ r3 ] - ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */ - add r1, r1, #4 /* Move onto the second item in the TCB... */ - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [ r2 ] /* Disable MPU. */ - - ldr r2, =0xe000ed9c /* Region Base Address register. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + ldr r0, =0xE000ED08 /* Use the NVIC offset register to locate the stack. */ + ldr r0, [r0] + ldr r0, [r0] + msr msp, r0 /* Set the msp back to the start of the stack. */ + + /*------------ Program MPU. ------------ */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, r3, # 1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ #if ( configTOTAL_MPU_REGIONS == 16 ) - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [ r2 ] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ - ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */ + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + msr psp, r0 + stmia r0, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ msr control, r3 - msr psp, r0 /* Restore the task stack pointer. */ + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + mov r0, #0 msr basepri, r0 - bx r14 - nop + bx lr /* *INDENT-ON* */ } /*-----------------------------------------------------------*/ @@ -650,72 +1083,90 @@ __asm void xPortPendSVHandler( void ) /* *INDENT-OFF* */ PRESERVE8 - mrs r0, psp - - ldr r3, =pxCurrentTCB /* Get the location of the current TCB. */ - ldr r2, [ r3 ] - - tst r14, #0x10 /* Is the task using the FPU context? If so, push high vfp registers. */ - it eq - vstmdbeq r0 !, { s16 - s31 } + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location where the context should be saved. */ - mrs r1, control - stmdb r0 !, { r1, r4 - r11, r14 } /* Save the remaining registers. */ - str r0, [ r2 ] /* Save the new top of stack into the first member of the TCB. */ + /*------------ Save Context. ----------- */ + mrs r3, control + mrs r0, psp + isb - stmdb sp !, { r0, r3 } - mov r0, # configMAX_SYSCALL_INTERRUPT_PRIORITY - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif + add r0, r0, #0x20 /* Move r0 to location where s0 is saved. */ + tst lr, #0x10 + ittt eq + vstmiaeq r1!, {s16-s31} /* Store s16-s31. */ + vldmiaeq r0, {s0-s16} /* Copy hardware saved FP context into s0-s16. */ + vstmiaeq r1!, {s0-s16} /* Store hardware saved FP context. */ + sub r0, r0, #0x20 /* Set r0 back to the location of hardware saved context. */ + + stmia r1!, {r3-r11, lr} /* Store CONTROL register, r4-r11 and LR. */ + ldmia r0, {r4-r11} /* Copy hardware saved context into r4-r11. */ + stmia r1!, {r0, r4-r11} /* Store original PSP (after hardware has saved context) and the hardware saved context. */ + str r1, [r2] /* Save the location from where the context should be restored as the first member of TCB. */ + + /*---------- Select next task. --------- */ + mov r0, #configMAX_SYSCALL_INTERRUPT_PRIORITY +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsid i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif msr basepri, r0 dsb isb - #if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) - cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ - #endif +#if ( configENABLE_ERRATA_837070_WORKAROUND == 1 ) + cpsie i /* ARM Cortex-M7 r0p1 Errata 837070 workaround. */ +#endif bl vTaskSwitchContext mov r0, #0 msr basepri, r0 - ldmia sp !, { r0, r3 } - /* Restore the context. */ - ldr r1, [ r3 ] - ldr r0, [ r1 ] /* The first item in the TCB is the task top of stack. */ - add r1, r1, #4 /* Move onto the second item in the TCB... */ - - dmb /* Complete outstanding transfers before disabling MPU. */ - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - bic r3, r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ - str r3, [ r2 ] /* Disable MPU. */ - - ldr r2, =0xe000ed9c /* Region Base Address register. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ - #if ( configTOTAL_MPU_REGIONS == 16 ) - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r1 !, { r4 - r11 } /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r2, { r4 - r11 } /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ - #endif /* configTOTAL_MPU_REGIONS == 16. */ - - ldr r2, =0xe000ed94 /* MPU_CTRL register. */ - ldr r3, [ r2 ] /* Read the value of MPU_CTRL. */ - orr r3, r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ - str r3, [ r2 ] /* Enable MPU. */ - dsb /* Force memory writes before continuing. */ - - ldmia r0 !, { r3 - r11, r14 } /* Pop the registers that are not automatically saved on exception entry. */ + /*------------ Program MPU. ------------ */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + add r2, r2, #4 /* r2 = Second item in the TCB which is xMPUSettings. */ + + dmb /* Complete outstanding transfers before disabling MPU. */ + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + bic r3, #1 /* r3 = r3 & ~1 i.e. Clear the bit 0 in r3. */ + str r3, [r0] /* Disable MPU. */ + + ldr r0, =0xe000ed9c /* Region Base Address register. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + +#if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ +#endif /* configTOTAL_MPU_REGIONS == 16. */ + + ldr r0, =0xe000ed94 /* MPU_CTRL register. */ + ldr r3, [r0] /* Read the value of MPU_CTRL. */ + orr r3, #1 /* r3 = r3 | 1 i.e. Set the bit 0 in r3. */ + str r3, [r0] /* Enable MPU. */ + dsb /* Force memory writes before continuing. */ + + /*---------- Restore Context. ---------- */ + ldr r3, =pxCurrentTCB /* r3 = &( pxCurrentTCB ). */ + ldr r2, [r3] /* r2 = pxCurrentTCB. */ + ldr r1, [r2] /* r1 = Location of saved context in TCB. */ + + ldmdb r1!, {r0, r4-r11} /* r0 contains PSP after the hardware had saved context. r4-r11 contain hardware saved context. */ + msr psp, r0 + stmia r0!, {r4-r11} /* Copy the hardware saved context on the task stack. */ + ldmdb r1!, {r3-r11, lr} /* r3 contains CONTROL register. r4-r11 and LR restored. */ msr control, r3 - tst r14, #0x10 /* Is the task using the FPU context? If so, pop the high vfp registers too. */ - it eq - vldmiaeq r0 !, { s16 - s31 } + tst lr, #0x10 + ittt eq + vldmdbeq r1!, {s0-s16} /* s0-s16 contain hardware saved FP context. */ + vstmiaeq r0!, {s0-s16} /* Copy hardware saved FP context on the task stack. */ + vldmdbeq r1!, {s16-s31} /* Restore s16-s31. */ - msr psp, r0 - bx r14 - nop + str r1, [r2] /* Save the location where the context should be saved next as the first member of TCB. */ + bx lr /* *INDENT-ON* */ } /*-----------------------------------------------------------*/ @@ -934,11 +1385,19 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) | ( portMPU_REGION_ENABLE ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) __SRAM_segment_start__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) __SRAM_segment_end__; + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); + /* Invalidate user configurable regions. */ for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ ) { xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } } else @@ -961,6 +1420,12 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) | ( ( configTEX_S_C_B_SRAM & portMPU_RASR_TEX_S_C_B_MASK ) << portMPU_RASR_TEX_S_C_B_LOCATION ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ 0 ].ulRegionStartAddress = ( uint32_t ) pxBottomOfStack; + xMPUSettings->xRegionSettings[ 0 ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) ( pxBottomOfStack ) + + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1UL ); + xMPUSettings->xRegionSettings[ 0 ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | + tskMPU_WRITE_PERMISSION ); } lIndex = 0; @@ -981,12 +1446,28 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) | ( xRegions[ lIndex ].ulParameters ) | ( portMPU_REGION_ENABLE ); + + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = ( uint32_t ) ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1UL ); + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; + if( ( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_ONLY ) == portMPU_REGION_READ_ONLY ) || + ( ( xRegions[lIndex].ulParameters & portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) == portMPU_REGION_PRIVILEGED_READ_WRITE_UNPRIV_READ_ONLY ) ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = tskMPU_READ_PERMISSION; + } + if( ( xRegions[ lIndex ].ulParameters & portMPU_REGION_READ_WRITE ) == portMPU_REGION_READ_WRITE ) + { + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION ); + } } else { /* Invalidate the region. */ xMPUSettings->xRegion[ ul ].ulRegionBaseAddress = ( ( ul - 1UL ) | portMPU_REGION_VALID ); xMPUSettings->xRegion[ ul ].ulRegionAttribute = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionStartAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionEndAddress = 0UL; + xMPUSettings->xRegionSettings[ ul ].ulRegionPermissions = 0UL; } lIndex++; @@ -995,6 +1476,47 @@ void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, } /*-----------------------------------------------------------*/ +BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer, + uint32_t ulBufferLength, + uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */ + +{ + uint32_t i, ulBufferStartAddress, ulBufferEndAddress; + BaseType_t xAccessGranted = pdFALSE; + const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */ + + if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG ) + { + xAccessGranted = pdTRUE; + } + else + { + if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE ) + { + ulBufferStartAddress = ( uint32_t ) pvBuffer; + ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL ); + + for( i = 0; i < portTOTAL_NUM_REGIONS_IN_TCB; i++ ) + { + if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionStartAddress, + xTaskMpuSettings->xRegionSettings[ i ].ulRegionEndAddress ) && + portIS_AUTHORIZED( ulAccessRequested, xTaskMpuSettings->xRegionSettings[ i ].ulRegionPermissions ) ) + { + xAccessGranted = pdTRUE; + break; + } + } + } + } + + return xAccessGranted; +} +/*-----------------------------------------------------------*/ + __asm uint32_t prvPortGetIPSR( void ) { /* *INDENT-OFF* */ diff --git a/portable/RVDS/ARM_CM4_MPU/portmacro.h b/portable/RVDS/ARM_CM4_MPU/portmacro.h index c7cd5628951..cc4e136d67e 100644 --- a/portable/RVDS/ARM_CM4_MPU/portmacro.h +++ b/portable/RVDS/ARM_CM4_MPU/portmacro.h @@ -193,9 +193,45 @@ typedef struct MPU_REGION_REGISTERS uint32_t ulRegionAttribute; } xMPU_REGION_REGISTERS; +typedef struct MPU_REGION_SETTINGS +{ + uint32_t ulRegionStartAddress; + uint32_t ulRegionEndAddress; + uint32_t ulRegionPermissions; +} xMPU_REGION_SETTINGS; + +#if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + + #ifndef configSYSTEM_CALL_STACK_SIZE + #error configSYSTEM_CALL_STACK_SIZE must be defined to the desired size of the system call stack in words for using MPU wrappers v2. + #endif + + typedef struct SYSTEM_CALL_STACK_INFO + { + uint32_t ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE ]; + uint32_t * pulSystemCallStack; + uint32_t * pulTaskStack; + uint32_t ulLinkRegisterAtSystemCallEntry; + } xSYSTEM_CALL_STACK_INFO; + +#endif /* #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) */ + +#define MAX_CONTEXT_SIZE 52 + +/* Flags used for xMPU_SETTINGS.ulTaskFlags member. */ +#define portSTACK_FRAME_HAS_PADDING_FLAG ( 1UL << 0UL ) +#define portTASK_IS_PRIVILEGED_FLAG ( 1UL << 1UL ) + typedef struct MPU_SETTINGS { xMPU_REGION_REGISTERS xRegion[ portTOTAL_NUM_REGIONS_IN_TCB ]; + xMPU_REGION_SETTINGS xRegionSettings[ portTOTAL_NUM_REGIONS_IN_TCB ]; + uint32_t ulContext[ MAX_CONTEXT_SIZE ]; + uint32_t ulTaskFlags; + + #if ( configUSE_MPU_WRAPPERS_V1 == 0 ) + xSYSTEM_CALL_STACK_INFO xSystemCallStackInfo; + #endif } xMPU_SETTINGS; /* Architecture specifics. */ @@ -209,9 +245,12 @@ typedef struct MPU_SETTINGS /*-----------------------------------------------------------*/ /* SVC numbers for various services. */ -#define portSVC_START_SCHEDULER 0 -#define portSVC_YIELD 1 -#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_START_SCHEDULER 0 +#define portSVC_YIELD 1 +#define portSVC_RAISE_PRIVILEGE 2 +#define portSVC_SYSTEM_CALL_ENTER 3 /* System calls with upto 4 parameters. */ +#define portSVC_SYSTEM_CALL_ENTER_1 4 /* System calls with 5 parameters. */ +#define portSVC_SYSTEM_CALL_EXIT 5 /* Scheduler utilities. */ @@ -314,6 +353,16 @@ extern void vResetPrivilege( void ); #define portRESET_PRIVILEGE() vResetPrivilege() /*-----------------------------------------------------------*/ +extern BaseType_t xPortIsTaskPrivileged( void ); + +/** + * @brief Checks whether or not the calling task is privileged. + * + * @return pdTRUE if the calling task is privileged, pdFALSE otherwise. + */ +#define portIS_TASK_PRIVILEGED() xPortIsTaskPrivileged() +/*-----------------------------------------------------------*/ + static portFORCE_INLINE void vPortSetBASEPRI( uint32_t ulBASEPRI ) { __asm diff --git a/queue.c b/queue.c index 23e97044dce..29765a54fe1 100644 --- a/queue.c +++ b/queue.c @@ -2194,6 +2194,12 @@ void vQueueDelete( QueueHandle_t xQueue ) #endif /* configUSE_TRACE_FACILITY */ /*-----------------------------------------------------------*/ +UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */ +{ + return ( ( Queue_t * ) xQueue )->uxItemSize; +} +/*-----------------------------------------------------------*/ + #if ( configUSE_MUTEXES == 1 ) static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) diff --git a/tasks.c b/tasks.c index ed3cd1ee2ed..72200ec4f3c 100644 --- a/tasks.c +++ b/tasks.c @@ -975,17 +975,17 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, { #if ( portSTACK_GROWTH < 0 ) { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) ); } #else /* portSTACK_GROWTH */ { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) ); } #endif /* portSTACK_GROWTH */ } #else /* portHAS_STACK_OVERFLOW_CHECKING */ { - pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) ); } #endif /* portHAS_STACK_OVERFLOW_CHECKING */ } @@ -5493,6 +5493,21 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, } #endif /* INCLUDE_vTaskSuspend */ } +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask ) + { + TCB_t * pxTCB; + + pxTCB = prvGetTCBFromHandle( xTask ); + + return &( pxTCB->xMPUSettings ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ /* Code below here allows additional code to be inserted into this source file, * especially where access to file scope functions and data is needed (for example From b60ab2fa14c12c9d169bc7b6467db8ee93bfb441 Mon Sep 17 00:00:00 2001 From: kar-rahul-aws <118818625+kar-rahul-aws@users.noreply.github.com> Date: Thu, 13 Jul 2023 17:13:16 +0530 Subject: [PATCH 58/62] Update History for Version 10.6.0 (#706) Signed-off-by: kar-rahul-aws --- History.txt | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/History.txt b/History.txt index 94b74e80363..1bf7ea045dc 100644 --- a/History.txt +++ b/History.txt @@ -1,5 +1,116 @@ Documentation and download available at https://www.FreeRTOS.org/ +Changes between FreeRTOS V10.5.1 and FreeRTOS 10.6.0 released July 13, 2023 + + + Add a new MPU wrapper that places additional restrictions on unprivileged + tasks. The following is the list of changes introduced with the new MPU + wrapper: + + 1. Opaque and indirectly verifiable integers for kernel object handles: + All the kernel object handles (for example, queue handles) are now + opaque integers. Previously object handles were raw pointers. + 2. Save the task context in Task Control Block (TCB): When a task is + swapped out by the scheduler, the task's context is now saved in its + TCB. Previously the task's context was saved on its stack. + 3. Execute system calls on a separate privileged only stack: FreeRTOS + system calls, which execute with elevated privilege, now use a + separate privileged only stack. Previously system calls used the + calling task's stack. The application writer can control the size of + the system call stack using new configSYSTEM_CALL_STACK_SIZE config + macro. + 4. Memory bounds checks: FreeRTOS system calls which accept a pointer + and de-reference it, now verify that the calling task has required + permissions to access the memory location referenced by the pointer. + 5. System calls restrictions: The following system calls are no longer + available to unprivileged tasks: + - vQueueDelete + - xQueueCreateMutex + - xQueueCreateMutexStatic + - xQueueCreateCountingSemaphore + - xQueueCreateCountingSemaphoreStatic + - xQueueGenericCreate + - xQueueGenericCreateStatic + - xQueueCreateSet + - xQueueRemoveFromSet + - xQueueGenericReset + - xTaskCreate + - xTaskCreateStatic + - vTaskDelete + - vTaskPrioritySet + - vTaskSuspendAll + - xTaskResumeAll + - xTaskGetHandle + - xTaskCallApplicationTaskHook + - vTaskList + - vTaskGetRunTimeStats + - xTaskCatchUpTicks + - xEventGroupCreate + - xEventGroupCreateStatic + - vEventGroupDelete + - xStreamBufferGenericCreate + - xStreamBufferGenericCreateStatic + - vStreamBufferDelete + - xStreamBufferReset + Also, an unprivileged task can no longer use vTaskSuspend to suspend + any task other than itself. + + We thank the following people for their inputs in these enhancements: + - David Reiss of Meta Platforms, Inc. + - Lan Luo, Xinhui Shao, Yumeng Wei, Zixia Liu, Huaiyu Yan and Zhen Ling + of School of Computer Science and Engineering, Southeast University, + China. + - Xinwen Fu of Department of Computer Science, University of + Massachusetts Lowell, USA. + - Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado + Boulder, USA. + + Add Cortex-M35P port. Contributed by @urutva. + + Add embedded extension (RV32E) support to the IAR RISC-V port. + + Add ulTaskGetRunTimeCounter and ulTaskGetRunTimePercent APIs. Contributed by + @chrisnc. + + Add APIs to get the application supplied buffers from statically + created kernel objects. The following new APIs are added: + - xTaskGetStaticBuffers + - xQueueGetStaticBuffers + - xQueueGenericGetStaticBuffers + - xSemaphoreGetStaticBuffer + - xEventGroupGetStaticBuffer + - xStreamBufferGetStaticBuffers + - xMessageBufferGetStaticBuffers + These APIs enable the application writer to obtain static buffers from + the kernel object and free/reuse them at the time of deletion. Earlier + the application writer had to maintain the association of static buffers + and the kernel object in the application. Contributed by @Dazza0. + + Add Thread Local Storage (TLS) support using picolibc function. Contributed + by @keith-packard. + + Add configTICK_TYPE_WIDTH_IN_BITS to configure TickType_t data type. As a result, + the number of bits in an event group also increases with big data type. Contributed + by @Hadatko. + + Update eTaskGetState and uxTaskGetSystemState to return eReady for pending ready + tasks. Contributed by @Dazza0. + + Update heap_4 and heap_5 to add padding only if the resulting block is not + already aligned. + + Fix the scheduler logic in a couple of places to not preempt a task when an + equal priority task becomes ready. + + Add macros used in FreeRTOS-Plus libraries. Contributed by @Holden. + + Fix clang compiler warnings. Contributed by @phelter. + + Add assertions to ARMv8-M ports to detect when FreeRTOS APIs are called from + interrupts with priority higher than the configMAX_SYSCALL_INTERRUPT_PRIORITY. + Contributed by @urutva. + + Add xPortIsInsideInterrupt API to ARM_CM0 ports. + + Fix build warning in MSP430X port when large data model is used. + + Add the ability to use Cortex-R5 port on the parts without FPU. + + Fix build warning in heap implementations on PIC24/dsPIC. + + Update interrupt priority asserts for Cortex-M ports so that these do not fire + on QEMU which does not implement PRIO bits. + + Update ARMv7-M ports to ensure that kernel interrupts run at the lowest priority. + configKERNEL_INTERRUPT_PRIORITY is now obsolete for ARMv7-M ports and brings + these ports inline with the newer ARMv8-M ports. Contributed by @chrisnc. + + Fix build issue in POSIX GCC port on Windows Subsystem for Linux (WSL). Contributed + by @jacky309. + + Add portMEMORY_BARRIER to Microblaze port. Contributed by @bbain. + + Add portPOINTER_SIZE_TYPE definition for ATmega port. Contributed by @jputcu. + + Multiple improvements in the CMake support. Contributed by @phelte and @cookpate. + Changes between FreeRTOS V10.5.0 and FreeRTOS V10.5.1 released November 16 2022 + Updated the kernel version in manifest and SBOM From 6f6f656aa75c66c2ba377b060f5bb4da3ec1497a Mon Sep 17 00:00:00 2001 From: "Mr. Jake" Date: Sun, 16 Jul 2023 00:16:31 +0200 Subject: [PATCH 59/62] Fixed compile options polluting project (#694) * Fixed compile options polluting project Moved add_library higher * Apply suggestions from code review Co-authored-by: Paul Bartell * fixed cmakelists keeping in mind the suggestions --------- Co-authored-by: Paul Bartell Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Co-authored-by: Aniruddha Kanhere <60444055+AniruddhaKanhere@users.noreply.github.com> --- CMakeLists.txt | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a68b652cada..c57d464028d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -228,6 +228,9 @@ elseif((FREERTOS_PORT STREQUAL "A_CUSTOM_PORT") AND (NOT TARGET freertos_kernel_ " freertos_kernel_include)") endif() + +add_library(freertos_kernel STATIC) + ######################################################################## # Overall Compile Options # Note the compile option strategy is to error on everything and then @@ -248,7 +251,7 @@ endif() # MSVC | MSVC # Note only for MinGW? # Renesas | ?TBD? -add_compile_options( +target_compile_options(freertos_kernel PRIVATE ### Gnu/Clang C Options $<$:-fdiagnostics-color=always> $<$:-fcolor-diagnostics> @@ -267,7 +270,7 @@ add_compile_options( add_subdirectory(include) add_subdirectory(portable) -add_library(freertos_kernel STATIC +target_sources(freertos_kernel PRIVATE croutine.c event_groups.c list.c @@ -280,6 +283,7 @@ add_library(freertos_kernel STATIC $>,${FREERTOS_HEAP},portable/MemMang/heap_${FREERTOS_HEAP}.c> ) + target_link_libraries(freertos_kernel PUBLIC freertos_kernel_port From 54b13568e43bbe2ef57bda0e97e322d1e28fa385 Mon Sep 17 00:00:00 2001 From: Soren Ptak Date: Mon, 17 Jul 2023 02:18:31 -0700 Subject: [PATCH 60/62] Fix the comments in the CM3 and CM4 MPU Ports about the MPU Region numbers being loaded (#707) Co-authored-by: Soren Ptak Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- portable/GCC/ARM_CM3/port.c | 0 portable/GCC/ARM_CM3_MPU/port.c | 8 ++++---- portable/GCC/ARM_CM4F/port.c | 0 portable/GCC/ARM_CM4_MPU/port.c | 20 ++++++++++---------- portable/IAR/ARM_CM4F_MPU/portasm.s | 16 ++++++++-------- portable/RVDS/ARM_CM4_MPU/port.c | 16 ++++++++-------- 6 files changed, 30 insertions(+), 30 deletions(-) mode change 100755 => 100644 portable/GCC/ARM_CM3/port.c mode change 100755 => 100644 portable/GCC/ARM_CM3_MPU/port.c mode change 100755 => 100644 portable/GCC/ARM_CM4F/port.c mode change 100755 => 100644 portable/GCC/ARM_CM4_MPU/port.c diff --git a/portable/GCC/ARM_CM3/port.c b/portable/GCC/ARM_CM3/port.c old mode 100755 new mode 100644 diff --git a/portable/GCC/ARM_CM3_MPU/port.c b/portable/GCC/ARM_CM3_MPU/port.c old mode 100755 new mode 100644 index d29b31d74f5..844b2ce1f53 --- a/portable/GCC/ARM_CM3_MPU/port.c +++ b/portable/GCC/ARM_CM3_MPU/port.c @@ -736,8 +736,8 @@ static void prvRestoreContextOfFirstTask( void ) " str r3, [r0] \n" /* Disable MPU. */ " \n" " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ " \n" " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ @@ -1011,8 +1011,8 @@ void xPortPendSVHandler( void ) " str r3, [r0] \n" /* Disable MPU. */ " \n" " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ " \n" " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ " ldr r3, [r0] \n" /* Read the value of MPU_CTRL. */ diff --git a/portable/GCC/ARM_CM4F/port.c b/portable/GCC/ARM_CM4F/port.c old mode 100755 new mode 100644 diff --git a/portable/GCC/ARM_CM4_MPU/port.c b/portable/GCC/ARM_CM4_MPU/port.c old mode 100755 new mode 100644 index bbcf733b89c..548187db351 --- a/portable/GCC/ARM_CM4_MPU/port.c +++ b/portable/GCC/ARM_CM4_MPU/port.c @@ -818,14 +818,14 @@ static void prvRestoreContextOfFirstTask( void ) " str r3, [r0] \n" /* Disable MPU. */ " \n" " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 8]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 4 - 8]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 9 - 12]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 9 - 12]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ " \n" " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ @@ -1137,14 +1137,14 @@ void xPortPendSVHandler( void ) " str r3, [r0] \n" /* Disable MPU. */ " \n" " ldr r0, =0xe000ed9c \n" /* Region Base Address register. */ - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ " \n" #if ( configTOTAL_MPU_REGIONS == 16 ) + " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - " ldmia r2!, {r4-r11} \n" /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - " stmia r0, {r4-r11} \n" /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ " \n" " ldr r0, =0xe000ed94 \n" /* MPU_CTRL register. */ diff --git a/portable/IAR/ARM_CM4F_MPU/portasm.s b/portable/IAR/ARM_CM4F_MPU/portasm.s index 3cbe5e0f5ed..a0cf8baa2f4 100644 --- a/portable/IAR/ARM_CM4F_MPU/portasm.s +++ b/portable/IAR/ARM_CM4F_MPU/portasm.s @@ -114,15 +114,15 @@ xPortPendSVHandler: str r3, [r0] /* Disable MPU. */ ldr r0, =0xe000ed9c /* Region Base Address register. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ #ifdef configTOTAL_MPU_REGIONS #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ #endif @@ -240,15 +240,15 @@ vPortRestoreContextOfFirstTask: str r3, [r0] /* Disable MPU. */ ldr r0, =0xe000ed9c /* Region Base Address register. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ #ifdef configTOTAL_MPU_REGIONS #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ #endif diff --git a/portable/RVDS/ARM_CM4_MPU/port.c b/portable/RVDS/ARM_CM4_MPU/port.c index beb52ea22ed..7bcf6bc8813 100755 --- a/portable/RVDS/ARM_CM4_MPU/port.c +++ b/portable/RVDS/ARM_CM4_MPU/port.c @@ -796,14 +796,14 @@ __asm void prvRestoreContextOfFirstTask( void ) str r3, [r0] /* Disable MPU. */ ldr r0, =0xe000ed9c /* Region Base Address register. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ ldr r0, =0xe000ed94 /* MPU_CTRL register. */ @@ -1132,14 +1132,14 @@ __asm void xPortPendSVHandler( void ) str r3, [r0] /* Disable MPU. */ ldr r0, =0xe000ed9c /* Region Base Address register. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 4 - 7]. */ + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 0 - 3]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers [MPU Region # 0 - 3]. */ #if ( configTOTAL_MPU_REGIONS == 16 ) + ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 4 - 7]. */ + stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 4 - 7]. */ ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 8 - 11]. */ stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 8 - 11]. */ - ldmia r2!, {r4-r11} /* Read 4 sets of MPU registers [MPU Region # 12 - 15]. */ - stmia r0, {r4-r11} /* Write 4 sets of MPU registers. [MPU Region # 12 - 15]. */ #endif /* configTOTAL_MPU_REGIONS == 16. */ ldr r0, =0xe000ed94 /* MPU_CTRL register. */ From c3dc20fdb4f15115fcaf8ad3f71fdf5696a30415 Mon Sep 17 00:00:00 2001 From: chinglee-iot <61685396+chinglee-iot@users.noreply.github.com> Date: Mon, 17 Jul 2023 18:42:05 +0800 Subject: [PATCH 61/62] Update xSemaphoreGetStaticBuffer prototype in comment (#704) Co-authored-by: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> --- include/semphr.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/semphr.h b/include/semphr.h index 740be5a5d12..e93b708c51c 100644 --- a/include/semphr.h +++ b/include/semphr.h @@ -1193,7 +1193,8 @@ typedef QueueHandle_t SemaphoreHandle_t; /** * semphr.h * @code{c} - * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore ); + * BaseType_t xSemaphoreGetStaticBuffer( SemaphoreHandle_t xSemaphore, + * StaticSemaphore_t ** ppxSemaphoreBuffer ); * @endcode * * Retrieve pointer to a statically created binary semaphore, counting semaphore, From 71662b5b5abf4c83974de20c9f6d38636ac930b5 Mon Sep 17 00:00:00 2001 From: Gaurav-Aggarwal-AWS <33462878+aggarg@users.noreply.github.com> Date: Tue, 18 Jul 2023 11:09:52 +0530 Subject: [PATCH 62/62] Correct the misspelled name (#708) Signed-off-by: Gaurav Aggarwal --- History.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/History.txt b/History.txt index 1bf7ea045dc..98f9822d454 100644 --- a/History.txt +++ b/History.txt @@ -61,7 +61,7 @@ Changes between FreeRTOS V10.5.1 and FreeRTOS 10.6.0 released July 13, 2023 China. - Xinwen Fu of Department of Computer Science, University of Massachusetts Lowell, USA. - - Yuequi Chen, Zicheng Wang, Minghao Lin of University of Colorado + - Yueqi Chen, Zicheng Wang, Minghao Lin of University of Colorado Boulder, USA. + Add Cortex-M35P port. Contributed by @urutva. + Add embedded extension (RV32E) support to the IAR RISC-V port.